code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import qiskit
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
A: Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
A: Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
A: List[Any] = qiskit.execute(_A , _A , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_A )
if __name__ == "__main__":
UpperCamelCase = half_adder(1, 1)
print(f'Half Adder Output Qubit Counts: {counts}')
| 319 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = 384
lowerCAmelCase_ = 7
if "tiny" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 6, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "small" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "base" in model_name:
lowerCAmelCase_ = 128
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (4, 8, 16, 32)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 512
elif "large" in model_name:
lowerCAmelCase_ = 192
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (6, 12, 24, 48)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 768
# set label information
lowerCAmelCase_ = 150
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''ade20k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = SwinConfig(
embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
lowerCAmelCase_ = UperNetConfig(
backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , )
return config
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(_A )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:dim, :]
lowerCAmelCase_ = in_proj_bias[: dim]
lowerCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase_ = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase_ = in_proj_weight[
-dim :, :
]
lowerCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(4 , in_channel // 4 )
lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
lowerCAmelCase_ = model_name_to_url[model_name]
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[
'''state_dict'''
]
for name, param in state_dict.items():
print(_A , param.shape )
lowerCAmelCase_ = get_upernet_config(_A )
lowerCAmelCase_ = UperNetForSemanticSegmentation(_A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
if "bn" in key:
lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' )
lowerCAmelCase_ = val
# rename keys
lowerCAmelCase_ = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A )
if "norm" in key:
lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A )
model.load_state_dict(_A )
# verify on image
lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' )
lowerCAmelCase_ = SegformerImageProcessor()
lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowerCAmelCase_ = model(_A )
lowerCAmelCase_ = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowerCAmelCase_ = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
lowerCAmelCase_ = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
lowerCAmelCase_ = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
lowerCAmelCase_ = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 0 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__A = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__A = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__A = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n"
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]:
def remove_articles(__UpperCAmelCase ):
lowercase__: str = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(_A , ''' ''' , _A )
def white_space_fix(__UpperCAmelCase ):
return " ".join(text.split() )
def remove_punc(__UpperCAmelCase ):
lowercase__: str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
return int(normalize_answer(_A ) == normalize_answer(_A ) )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
lowercase__: Dict = [any(compute_exact(_A , _A ) for ref in refs ) for pred, refs in zip(_A , _A )]
return (sum(_A ) / len(_A )) * 1_0_0
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
lowercase__: Any = [rgram for rgrams in rgramslist for rgram in rgrams]
lowercase__: Any = Counter(_A )
lowercase__: Tuple = Counter(_A )
lowercase__: Optional[Any] = Counter()
for sgram, scount in sgramcounter.items():
lowercase__: str = scount * numref
lowercase__: int = Counter(_A )
lowercase__: Optional[Any] = Counter()
for cgram, ccount in cgramcounter.items():
lowercase__: int = ccount * numref
# KEEP
lowercase__: Any = sgramcounter_rep & cgramcounter_rep
lowercase__: int = keepgramcounter_rep & rgramcounter
lowercase__: str = sgramcounter_rep & rgramcounter
lowercase__: List[str] = 0
lowercase__: int = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase__: List[Any] = 1
lowercase__: int = 1
if len(_A ) > 0:
lowercase__: str = keeptmpscorea / len(_A )
if len(_A ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowercase__: Optional[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowercase__: Tuple = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowercase__: Optional[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowercase__: Any = sgramcounter_rep - cgramcounter_rep
lowercase__: Dict = delgramcounter_rep - rgramcounter
lowercase__: Dict = sgramcounter_rep - rgramcounter
lowercase__: Union[str, Any] = 0
lowercase__: List[Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase__: Optional[int] = 1
if len(_A ) > 0:
lowercase__: Any = deltmpscorea / len(_A )
# ADDITION
lowercase__: Optional[Any] = set(_A ) - set(_A )
lowercase__: str = set(_A ) & set(_A )
lowercase__: Union[str, Any] = set(_A ) - set(_A )
lowercase__: str = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase__: Union[str, Any] = 1
lowercase__: List[Any] = 1
if len(_A ) > 0:
lowercase__: Optional[Any] = addtmpscore / len(_A )
if len(_A ) > 0:
lowercase__: Dict = addtmpscore / len(_A )
lowercase__: List[Any] = 0
if addscore_precision > 0 or addscore_recall > 0:
lowercase__: List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__: Optional[Any] = len(_A )
lowercase__: Optional[Any] = ssent.split(''' ''' )
lowercase__: Any = csent.split(''' ''' )
lowercase__: str = []
lowercase__: Dict = []
lowercase__: str = []
lowercase__: List[str] = []
lowercase__: str = []
lowercase__: List[str] = []
lowercase__: str = []
lowercase__: Any = []
lowercase__: List[str] = []
lowercase__: Union[str, Any] = []
for rsent in rsents:
lowercase__: int = rsent.split(''' ''' )
lowercase__: Union[str, Any] = []
lowercase__: int = []
lowercase__: List[Any] = []
ragramslist.append(_A )
for i in range(0 , len(_A ) - 1 ):
if i < len(_A ) - 1:
lowercase__: List[Any] = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_A )
if i < len(_A ) - 2:
lowercase__: Any = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_A )
if i < len(_A ) - 3:
lowercase__: int = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_A )
ragramslist.append(_A )
ragramslist.append(_A )
ragramslist.append(_A )
for i in range(0 , len(_A ) - 1 ):
if i < len(_A ) - 1:
lowercase__: List[str] = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_A )
if i < len(_A ) - 2:
lowercase__: Union[str, Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_A )
if i < len(_A ) - 3:
lowercase__: Optional[int] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_A )
for i in range(0 , len(_A ) - 1 ):
if i < len(_A ) - 1:
lowercase__: Tuple = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_A )
if i < len(_A ) - 2:
lowercase__: Dict = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_A )
if i < len(_A ) - 3:
lowercase__: int = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_A )
((lowercase__), (lowercase__), (lowercase__)): Optional[int] = SARIngram(_A , _A , _A , _A )
((lowercase__), (lowercase__), (lowercase__)): Dict = SARIngram(_A , _A , _A , _A )
((lowercase__), (lowercase__), (lowercase__)): Dict = SARIngram(_A , _A , _A , _A )
((lowercase__), (lowercase__), (lowercase__)): Dict = SARIngram(_A , _A , _A , _A )
lowercase__: Any = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowercase__: int = sum([delascore, delascore, delascore, delascore] ) / 4
lowercase__: Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4
lowercase__: str = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = "13a" , __UpperCAmelCase = True ) -> Dict:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
lowercase__: Optional[int] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowercase__: Any = sacrebleu.metrics.bleu._get_tokenizer(_A )()(_A )
else:
lowercase__: Tuple = sacrebleu.TOKENIZERS[tokenizer]()(_A )
elif tokenizer == "moses":
lowercase__: int = sacremoses.MosesTokenizer().tokenize(_A , return_str=_A , escape=_A )
elif tokenizer == "penn":
lowercase__: Any = sacremoses.MosesTokenizer().penn_tokenize(_A , return_str=_A )
else:
lowercase__: List[Any] = sentence
if not return_str:
lowercase__: Any = normalized_sent.split()
return normalized_sent
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
if not (len(_A ) == len(_A ) == len(_A )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
lowercase__: Any = 0
for src, pred, refs in zip(_A , _A , _A ):
sari_score += SARIsent(normalize(_A ) , normalize(_A ) , [normalize(_A ) for sent in refs] )
lowercase__: List[str] = sari_score / len(_A )
return 1_0_0 * sari_score
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="exp" , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , ) -> Optional[Any]:
lowercase__: int = len(references[0] )
if any(len(_A ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowercase__: List[Any] = [[refs[i] for refs in references] for i in range(_A )]
lowercase__: Dict = sacrebleu.corpus_bleu(
_A , _A , smooth_method=_A , smooth_value=_A , force=_A , lowercase=_A , use_effective_order=_A , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase (datasets.Metric ):
"""simple docstring"""
def _snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[int] = {}
result.update({'''sari''': compute_sari(sources=UpperCamelCase__ , predictions=UpperCamelCase__ , references=UpperCamelCase__ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=UpperCamelCase__ , references=UpperCamelCase__ )} )
result.update({'''exact''': compute_em(predictions=UpperCamelCase__ , references=UpperCamelCase__ )} )
return result
| 177 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = args.log_outputs
lowerCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowerCAmelCase_ = load_metric('''wer''' )
lowerCAmelCase_ = load_metric('''cer''' )
# compute metrics
lowerCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowerCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowerCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}"
print(_A )
with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f:
f.write(_A )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase_ = f"log_{dataset_id}_predictions.txt"
lowerCAmelCase_ = f"log_{dataset_id}_targets.txt"
with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t:
# mapping function to write output
def write_to_file(_A , _A ):
p.write(f"{i}" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"{i}" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(_A , with_indices=_A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase_ = re.sub(_A , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowerCAmelCase_ = ''' '''.join(text.split(_A ) )
return text
def __UpperCamelCase ( _A ):
# load dataset
lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase_ = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1
lowerCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_A ):
lowerCAmelCase_ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase_ = prediction['''text''']
lowerCAmelCase_ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowerCAmelCase_ = dataset.map(_A , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_A , _A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
_A = parser.parse_args()
main(args)
| 278 | 0 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : Union[str, Any] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
__lowerCamelCase : List[Any] = {
'''allenai/led-base-16384''': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
SCREAMING_SNAKE_CASE__ = bs[:]
SCREAMING_SNAKE_CASE__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE__ = [chr(_A ) for n in cs]
return dict(zip(_A , _A ) )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
return pairs
class __snake_case ( __UpperCAmelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["input_ids", "attention_mask"]
def __init__( self : Dict , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : int="replace" , _lowercase : List[str]="<s>" , _lowercase : Tuple="</s>" , _lowercase : List[Any]="</s>" , _lowercase : Optional[int]="<s>" , _lowercase : Optional[int]="<unk>" , _lowercase : str="<pad>" , _lowercase : Union[str, Any]="<mask>" , _lowercase : Optional[int]=False , **_lowercase : List[Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding="""utf-8""" ) as vocab_handle:
SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE__ = bytes_to_unicode()
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding="""utf-8""" ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split("""\n""" )[1:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE__ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE__ = re.compile(R"""\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __a ( self : Optional[Any] ):
"""simple docstring"""
return len(self.encoder )
def __a ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : List[Any] , _lowercase : List[Any] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(UpperCamelCase__ , key=lambda _lowercase : self.bpe_ranks.get(UpperCamelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(UpperCamelCase__ ):
try:
SCREAMING_SNAKE_CASE__ = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = """ """.join(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = word
return word
def __a ( self : Dict , _lowercase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for token in re.findall(self.pat , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(""" """ ) )
return bpe_tokens
def __a ( self : List[str] , _lowercase : str ):
"""simple docstring"""
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def __a ( self : Optional[Any] , _lowercase : Tuple ):
"""simple docstring"""
return self.decoder.get(UpperCamelCase__ )
def __a ( self : Optional[Any] , _lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """""".join(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __a ( self : str , _lowercase : List[Any] , _lowercase : List[str] = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + """\n""" )
SCREAMING_SNAKE_CASE__ = 0
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
SCREAMING_SNAKE_CASE__ = token_index
writer.write(""" """.join(UpperCamelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __a ( self : List[Any] , _lowercase : int , _lowercase : Optional[Any] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : int , _lowercase : Optional[int] , _lowercase : Union[str, Any] = None , _lowercase : Optional[Any] = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def __a ( self : Optional[Any] , _lowercase : Optional[int] , _lowercase : List[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self : str , _lowercase : Union[str, Any] , _lowercase : Optional[int]=False , **_lowercase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE__ = """ """ + text
return (text, kwargs)
def __a ( self : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Any = None , _lowercase : int = PaddingStrategy.DO_NOT_PAD , _lowercase : Tuple = None , _lowercase : str = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super()._pad(
encoded_inputs=UpperCamelCase__ , max_length=UpperCamelCase__ , padding_strategy=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE__ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE__ = len(encoded_inputs["""global_attention_mask"""] ) != len(UpperCamelCase__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE__ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE__ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 219 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''nielsr/canine-s''': 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
_A = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_A = 0
_A = 0xe0_00
_A = 0xe0_01
_A = 0xe0_02
_A = 0xe0_03
_A = 0xe0_04
# Maps special codepoints to human-readable names.
_A = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A ( __UpperCAmelCase ):
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=False, UpperCamelCase__=2048, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token
super().__init__(
bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, model_max_length=UpperCamelCase__, **UpperCamelCase__, )
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase_ = UNICODE_VOCAB_SIZE
lowerCAmelCase_ = len(self._special_codepoints )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._unicode_vocab_size
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return list(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
try:
return ord(UpperCamelCase__ )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase__ )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return "".join(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ )
lowerCAmelCase_ = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase__ )) + [1]
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
return ()
| 278 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case ={
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 4 |
def __UpperCamelCase ( _A = 1000000 ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
lowerCAmelCase_ = {1: 1}
for inputa in range(2 , _A ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ = counter
if counter > pre_counter:
lowerCAmelCase_ = inputa
lowerCAmelCase_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 278 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Optional[int] = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class _UpperCAmelCase ( __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = "open-llama"
def __init__( self : Optional[int] , A : Optional[Any]=10_00_00 , A : List[Any]=40_96 , A : Any=1_10_08 , A : int=32 , A : Dict=32 , A : Optional[Any]="silu" , A : Union[str, Any]=20_48 , A : Optional[Any]=0.02 , A : Union[str, Any]=1e-6 , A : Optional[Any]=True , A : str=0 , A : Tuple=1 , A : List[Any]=2 , A : Tuple=False , A : List[Any]=True , A : Union[str, Any]=0.1 , A : Tuple=0.1 , A : str=True , A : List[str]=True , A : str=None , **A : int , ) -> Dict:
lowercase_ : Tuple = vocab_size
lowercase_ : Optional[int] = max_position_embeddings
lowercase_ : Tuple = hidden_size
lowercase_ : Optional[int] = intermediate_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = initializer_range
lowercase_ : Union[str, Any] = rms_norm_eps
lowercase_ : Any = use_cache
lowercase_ : List[Any] = kwargs.pop(
'''use_memorry_efficient_attention''' , UpperCamelCase__ )
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : int = attention_dropout_prob
lowercase_ : Optional[Any] = use_stable_embedding
lowercase_ : Optional[Any] = shared_input_output_embedding
lowercase_ : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ , )
def A ( self : Any ) -> List[str]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
lowercase_ : Any = self.rope_scaling.get('''type''' , UpperCamelCase__ )
lowercase_ : str = self.rope_scaling.get('''factor''' , UpperCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 33 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
lowerCAmelCase_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(UpperCamelCase__ )
from datasets import load_dataset
lowerCAmelCase_ = load_dataset('''nielsr/rvlcdip-demo''' )
lowerCAmelCase_ = dataset['''train'''][0]['''image'''].convert('''RGB''' )
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
lowerCAmelCase_ = outputs.logits
lowerCAmelCase_ = torch.Size((1, 16) )
self.assertEqual(logits.shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347], device=UpperCamelCase__, dtype=torch.float, )
self.assertTrue(torch.allclose(logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
| 278 | 0 |
import heapq
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
__lowerCamelCase : str = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_A , [-1 * len(_A ), (key, value)] )
# chosen_vertices = set of chosen vertices
__lowerCamelCase : List[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__lowerCamelCase : Tuple = heapq.heappop(_A )[1][0]
chosen_vertices.add(_A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__lowerCamelCase : Tuple = elem[1][1].index(_A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
a ={0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 73 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = checkpoints.load_tax_checkpoint(_A )
lowerCAmelCase_ = flatten_dict(_A )
return flax_params
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowerCAmelCase_ = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCAmelCase_ = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCAmelCase_ = new_key.replace(_A , _A )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCAmelCase_ = new_key.replace(_A , _A )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A )
lowerCAmelCase_ = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A )
lowerCAmelCase_ = flax_dict[key]
lowerCAmelCase_ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCAmelCase_ = torch.from_numpy(converted_dict[key].T )
else:
lowerCAmelCase_ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __UpperCamelCase ( _A , _A , _A=False , _A=False ):
lowerCAmelCase_ = get_flax_param(_A )
if not use_large:
lowerCAmelCase_ = PixaStructVisionConfig()
lowerCAmelCase_ = PixaStructTextConfig()
else:
lowerCAmelCase_ = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCAmelCase_ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCAmelCase_ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_A )
lowerCAmelCase_ = PixaStructForConditionalGeneration(_A )
lowerCAmelCase_ = rename_and_convert_flax_params(_A )
model.load_state_dict(_A )
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowerCAmelCase_ = PixaStructImageProcessor()
lowerCAmelCase_ = PixaStructProcessor(image_processor=_A , tokenizer=_A )
if use_large:
lowerCAmelCase_ = 4096
lowerCAmelCase_ = True
# mkdir if needed
os.makedirs(_A , exist_ok=_A )
model.save_pretrained(_A )
processor.save_pretrained(_A )
print('''Model saved in {}'''.format(_A ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 278 | 0 |
from jiwer import compute_measures
import datasets
_SCREAMING_SNAKE_CASE = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
_SCREAMING_SNAKE_CASE = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
_SCREAMING_SNAKE_CASE = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=False ) -> int:
if concatenate_texts:
return compute_measures(UpperCamelCase__ , UpperCamelCase__ )["wer"]
else:
_A = 0
_A = 0
for prediction, reference in zip(UpperCamelCase__ , UpperCamelCase__ ):
_A = compute_measures(UpperCamelCase__ , UpperCamelCase__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 180 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_A = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''', UpperCamelCase__, )
super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
| 278 | 0 |
import torch
from diffusers import StableDiffusionPipeline
lowercase__ :Tuple = "path-to-your-trained-model"
lowercase__ :List[str] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
lowercase__ :str = "A photo of sks dog in a bucket"
lowercase__ :Tuple = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 101 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCAmelCase_ = 192
lowerCAmelCase_ = 768
lowerCAmelCase_ = 12
lowerCAmelCase_ = 3
lowerCAmelCase_ = [800, 1333]
lowerCAmelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase_ = 330
lowerCAmelCase_ = 14
lowerCAmelCase_ = 6
lowerCAmelCase_ = 1320
elif "yolos_s" in yolos_name:
lowerCAmelCase_ = 384
lowerCAmelCase_ = 1536
lowerCAmelCase_ = 12
lowerCAmelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCAmelCase_ = [800, 1344]
lowerCAmelCase_ = 91
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''coco-detection-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( _A , _A , _A = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase_ = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCAmelCase_ = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( _A ):
if "backbone" in name:
lowerCAmelCase_ = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
lowerCAmelCase_ = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
lowerCAmelCase_ = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
lowerCAmelCase_ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
lowerCAmelCase_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
lowerCAmelCase_ = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowerCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
lowerCAmelCase_ = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
lowerCAmelCase_ = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
lowerCAmelCase_ = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __UpperCamelCase ( _A , _A ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(_A )
if "qkv" in key:
lowerCAmelCase_ = key.split('''.''' )
lowerCAmelCase_ = int(key_split[2] )
lowerCAmelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = val
return orig_state_dict
def __UpperCamelCase ( ):
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A , _A = False ):
lowerCAmelCase_ = get_yolos_config(_A )
# load original state_dict
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )['''model''']
# load 🤗 model
lowerCAmelCase_ = YolosForObjectDetection(_A )
model.eval()
lowerCAmelCase_ = convert_state_dict(_A , _A )
model.load_state_dict(_A )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCAmelCase_ = 800 if yolos_name != '''yolos_ti''' else 512
lowerCAmelCase_ = YolosImageProcessor(format='''coco_detection''' , size=_A )
lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.logits, outputs.pred_boxes
lowerCAmelCase_ , lowerCAmelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCAmelCase_ = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCAmelCase_ = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCAmelCase_ = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCAmelCase_ = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCAmelCase_ = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCAmelCase_ = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase_ = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCAmelCase_ = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCAmelCase_ = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCAmelCase_ = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1E-4 )
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_A )
if push_to_hub:
lowerCAmelCase_ = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
lowerCAmelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(_A , organization='''hustvl''' )
model.push_to_hub(_A , organization='''hustvl''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class UpperCamelCase__ ( __UpperCAmelCase ):
'''simple docstring'''
__snake_case : List[Any] = "data2vec-text"
def __init__( self : str ,lowerCamelCase__ : List[str]=30522 ,lowerCamelCase__ : Any=768 ,lowerCamelCase__ : Tuple=12 ,lowerCamelCase__ : List[str]=12 ,lowerCamelCase__ : Optional[Any]=3072 ,lowerCamelCase__ : Union[str, Any]="gelu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : str=0.1 ,lowerCamelCase__ : str=512 ,lowerCamelCase__ : Union[str, Any]=2 ,lowerCamelCase__ : Optional[Any]=0.02 ,lowerCamelCase__ : int=1e-1_2 ,lowerCamelCase__ : Optional[int]=1 ,lowerCamelCase__ : List[Any]=0 ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : Optional[Any]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Dict=None ,**lowerCamelCase__ : Optional[int] ,) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ ,bos_token_id=UpperCamelCase__ ,eos_token_id=UpperCamelCase__ ,**UpperCamelCase__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = classifier_dropout
class UpperCamelCase__ ( __UpperCAmelCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 296 |
def __UpperCamelCase ( _A ):
if not numbers:
return 0
if not isinstance(_A , (list, tuple) ) or not all(
isinstance(_A , _A ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0]
for i in range(1 , len(_A ) ):
# update the maximum and minimum subarray products
lowerCAmelCase_ = numbers[i]
if number < 0:
lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now
lowerCAmelCase_ = max(_A , max_till_now * number )
lowerCAmelCase_ = min(_A , min_till_now * number )
# update the maximum product found till now
lowerCAmelCase_ = max(_A , _A )
return max_prod
| 278 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_lowercase : List[str] = False
_lowercase : Tuple = True
_lowercase : Any = False
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
_lowercase : Optional[Any] = parser.parse_args()
_lowercase : List[Any] = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
_lowercase : Optional[Any] = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
_lowercase : str = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
_lowercase : Union[str, Any] = reader.read()
_lowercase : Dict = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
_lowercase : Optional[Any] = UNetaDModel(**config)
else:
_lowercase : Tuple = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
_lowercase : Any = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_lowercase : Dict = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_lowercase : Optional[Any] = config[key]
del config[key]
_lowercase : Optional[Any] = [k.replace('UNetRes', '') for k in config['down_block_types']]
_lowercase : Union[str, Any] = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
_lowercase : int = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
_lowercase : List[Any] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
_lowercase : Tuple = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
_lowercase : Dict = param_value
_lowercase : Union[str, Any] = True
if not has_changed:
_lowercase : int = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 332 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
lowerCAmelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCAmelCase_ = [4, 4, 4, 4]
lowerCAmelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
else:
lowerCAmelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCAmelCase_ = 96
elif "small" in model_name:
lowerCAmelCase_ = 96
elif "base" in model_name:
lowerCAmelCase_ = 128
elif "large" in model_name:
lowerCAmelCase_ = 192
elif "xlarge" in model_name:
lowerCAmelCase_ = 256
elif "huge" in model_name:
lowerCAmelCase_ = 352
# set label information
lowerCAmelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCAmelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = FocalNetConfig(
embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , )
return config
def __UpperCamelCase ( _A ):
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowerCAmelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowerCAmelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase_ = name.replace('''head''' , '''classifier''' )
else:
lowerCAmelCase_ = '''focalnet.''' + name
return name
def __UpperCamelCase ( _A , _A , _A=False ):
# fmt: off
lowerCAmelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCAmelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _A )
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
lowerCAmelCase_ = val
lowerCAmelCase_ = get_focalnet_config(_A )
lowerCAmelCase_ = FocalNetForImageClassification(_A )
model.eval()
# load state dict
model.load_state_dict(_A )
# verify conversion
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = BitImageProcessor(
do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , )
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' )
lowerCAmelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 )
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
_A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowercase__ : Tuple = ["""gpt2"""]
lowercase__ : Optional[Any] = """gpt2"""
if is_tf_available():
class UpperCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : str ):
super().__init__()
lowerCAmelCase_ : Union[str, Any] = tokenizer
lowerCAmelCase_ : Tuple = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ : Tuple = TFGPTaLMHeadModel.from_config(UpperCamelCase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase_ : Optional[int] = self.tokenizer(UpperCamelCase__ )
lowerCAmelCase_ : List[str] = tokenized['input_ids'].to_tensor()
lowerCAmelCase_ : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCAmelCase_ : int = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )['logits']
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
super().setUp()
lowerCAmelCase_ : str = [GPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCAmelCase_ : Dict = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCAmelCase_ : int = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
lowerCAmelCase_ : Optional[int] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self : int ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCAmelCase_ : str = tokenizer([test_inputs] , return_tensors='tf' )
lowerCAmelCase_ : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCAmelCase_ : Any = python_outputs[key].numpy()
lowerCAmelCase_ : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ : str = tf.function(UpperCamelCase__ )
for test_inputs in self.test_sentences:
lowerCAmelCase_ : str = tf.constant(UpperCamelCase__ )
lowerCAmelCase_ : Any = compiled_tokenizer(UpperCamelCase__ )
lowerCAmelCase_ : List[Any] = tf_tokenizer(UpperCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase__ )
lowerCAmelCase_ : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase_ : int = model.serving(UpperCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCAmelCase_ : Union[str, Any] = Path(UpperCamelCase__ ) / 'saved.model'
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={'serving_default': model.serving} )
lowerCAmelCase_ : Any = tf.saved_model.load(UpperCamelCase__ )
lowerCAmelCase_ : int = loaded_model.signatures['serving_default'](UpperCamelCase__ )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ : str = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase_ : Optional[Any] = tf_tokenizer(UpperCamelCase__ ) # Build model with some sample inputs
lowerCAmelCase_ : Optional[Any] = tf_tokenizer.get_config()
lowerCAmelCase_ : int = TFGPTaTokenizer.from_config(UpperCamelCase__ )
lowerCAmelCase_ : Optional[int] = model_from_config(UpperCamelCase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCAmelCase_ : int = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
lowerCAmelCase_ : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase_ : str = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ )
lowerCAmelCase_ : Union[str, Any] = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 224 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A )
lowerCAmelCase_ = emb.weight.data
return lin_layer
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )
lowerCAmelCase_ = Namespace(**checkpoint['''cfg''']['''model'''] )
lowerCAmelCase_ = checkpoint['''model''']
remove_ignore_keys_(_A )
lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowerCAmelCase_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
lowerCAmelCase_ = XGLMConfig(
vocab_size=_A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCAmelCase_ = XGLMForCausalLM(_A )
lowerCAmelCase_ = model.load_state_dict(_A , strict=_A )
print(_A )
lowerCAmelCase_ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A = parser.parse_args()
_A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 278 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def SCREAMING_SNAKE_CASE( __lowercase ) -> Union[str, Any]:
A: List[str] = 0
A: Tuple = number
while duplicate > 0:
A , A: Tuple = divmod(_A , 1_0 )
fact_sum += factorial(_A )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
UpperCamelCase = int(input('''Enter number: ''').strip())
print(
f'{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'
)
| 319 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A = dict(zip(vocab, range(len(vocab))))
_A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A = Path(tmpdirname)
_A = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
_A = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 278 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase (__UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :str = CLIPTokenizer
_UpperCAmelCase :Union[str, Any] = CLIPTokenizerFast
_UpperCAmelCase :Dict = True
_UpperCAmelCase :Union[str, Any] = {}
_UpperCAmelCase :Optional[int] = False
def _snake_case ( self ):
super().setUp()
# fmt: off
lowercase__: int = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowercase__: str = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowercase__: List[Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
lowercase__: Any = {'''unk_token''': '''<unk>'''}
lowercase__: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def _snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Tuple = '''lower newer'''
lowercase__: str = '''lower newer'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__: Tuple = '''lower newer'''
lowercase__: List[str] = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
lowercase__: List[str] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase__: List[str] = tokens + [tokenizer.unk_token]
lowercase__: Optional[int] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
@require_ftfy
def _snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__: str = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
lowercase__: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
lowercase__: Tuple = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
lowercase__: str = tokenizer_s.tokenize(UpperCamelCase__ )
lowercase__: Tuple = tokenizer_r.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowercase__: List[Any] = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
lowercase__: Tuple = tokenizer_s.tokenize(UpperCamelCase__ )
lowercase__: Any = tokenizer_r.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Test that the tokenization is identical on unicode of space type
lowercase__: str = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowercase__: List[str] = tokenizer_s.tokenize(UpperCamelCase__ )
lowercase__: Optional[Any] = tokenizer_r.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Test that the tokenization is identical on unicode of line break type
lowercase__: List[Any] = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowercase__: Any = tokenizer_s.tokenize(UpperCamelCase__ )
lowercase__: Optional[Any] = tokenizer_r.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def _snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__: List[str] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase__: Union[str, Any] = F"""{text_of_1_token} {text_of_1_token}"""
lowercase__: List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , )
lowercase__: Tuple = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
lowercase__: Tuple = F""" {text}"""
lowercase__: Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , )
lowercase__: Any = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ) + 1, 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
def _snake_case ( self ):
with self.assertRaises(UpperCamelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def _snake_case ( self ):
super().test_tokenization_python_rust_equals()
def _snake_case ( self ):
pass
| 177 |
import argparse
from collections import defaultdict
import yaml
_A = '''docs/source/en/_toctree.yml'''
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = defaultdict(_A )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCAmelCase_ = [key for key, value in counts.items() if value > 1]
lowerCAmelCase_ = []
for duplicate_key in duplicates:
lowerCAmelCase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(_A ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(_A , key=lambda _A : s["title"].lower() )
def __UpperCamelCase ( _A=False ):
with open(_A , encoding='''utf-8''' ) as f:
lowerCAmelCase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase_ = content[api_idx]['''sections''']
# Then to the model doc
lowerCAmelCase_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCAmelCase_ = api_doc[model_idx]['''sections''']
lowerCAmelCase_ = [(idx, section) for idx, section in enumerate(_A ) if '''sections''' in section]
lowerCAmelCase_ = False
for idx, modality_doc in modalities_docs:
lowerCAmelCase_ = modality_doc['''sections''']
lowerCAmelCase_ = clean_model_doc_toc(_A )
if old_modality_doc != new_modality_doc:
lowerCAmelCase_ = True
if overwrite:
lowerCAmelCase_ = new_modality_doc
if diff:
if overwrite:
lowerCAmelCase_ = model_doc
lowerCAmelCase_ = api_doc
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_A = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 278 | 0 |
import math
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
return math.sqrt(_A ) * math.sqrt(_A ) == num
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = n
while left <= right:
SCREAMING_SNAKE_CASE__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
SCREAMING_SNAKE_CASE__ = mid - 1
else:
SCREAMING_SNAKE_CASE__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A ( __UpperCAmelCase ):
__snake_case = (UnCLIPScheduler,)
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**UpperCamelCase__ )
return config
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = 0.5
assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase_ = None
else:
lowerCAmelCase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
| 278 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def a_ ( lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : List[Any] = 1 / sqrt(2 ) ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_A )
lowerCAmelCase = cos(_A )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = (1 - _cos) / 2
lowerCAmelCase = 1 - _cos
lowerCAmelCase = 1 + alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : List[str] = 1 / sqrt(2 ) ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_A )
lowerCAmelCase = cos(_A )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = (1 + _cos) / 2
lowerCAmelCase = -1 - _cos
lowerCAmelCase = 1 + alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : str , lowerCamelCase : str = 1 / sqrt(2 ) ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_A )
lowerCAmelCase = cos(_A )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = _sin / 2
lowerCAmelCase = 0
lowerCAmelCase = -ba
lowerCAmelCase = 1 + alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : Dict = 1 / sqrt(2 ) ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_A )
lowerCAmelCase = cos(_A )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 1 - alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 + alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : str , lowerCamelCase : List[Any] = 1 / sqrt(2 ) , ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_A )
lowerCAmelCase = cos(_A )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 10 ** (gain_db / 40)
lowerCAmelCase = 1 + alpha * big_a
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha * big_a
lowerCAmelCase = 1 + alpha / big_a
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha / big_a
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : int = 1 / sqrt(2 ) , ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_A )
lowerCAmelCase = cos(_A )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 10 ** (gain_db / 40)
lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase = 2 * sqrt(_A ) * alpha
lowerCAmelCase = big_a * (pmc + aaa)
lowerCAmelCase = 2 * big_a * mpc
lowerCAmelCase = big_a * (pmc - aaa)
lowerCAmelCase = ppmc + aaa
lowerCAmelCase = -2 * pmpc
lowerCAmelCase = ppmc - aaa
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : Dict = 1 / sqrt(2 ) , ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_A )
lowerCAmelCase = cos(_A )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 10 ** (gain_db / 40)
lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase = 2 * sqrt(_A ) * alpha
lowerCAmelCase = big_a * (ppmc + aaa)
lowerCAmelCase = -2 * big_a * pmpc
lowerCAmelCase = big_a * (ppmc - aaa)
lowerCAmelCase = pmc + aaa
lowerCAmelCase = 2 * mpc
lowerCAmelCase = pmc - aaa
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 4 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = torch.device('''cpu''')
def __UpperCamelCase ( ):
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
return im
def __UpperCamelCase ( _A ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(_A )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
for k in state_dict.keys():
lowerCAmelCase_ = k
if ".pwconv" in k:
lowerCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
lowerCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
lowerCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
lowerCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
lowerCAmelCase_ = k_new.split('''.''' )
if ls[2].isdigit():
lowerCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
lowerCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase_ = 1000
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCAmelCase_ = [3, 3, 6, 4]
lowerCAmelCase_ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCAmelCase_ = [3, 3, 9, 6]
lowerCAmelCase_ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCAmelCase_ = [4, 3, 10, 5]
lowerCAmelCase_ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCAmelCase_ = [4, 4, 12, 6]
lowerCAmelCase_ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , check_hash=_A )
else:
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = create_rename_keys(_A )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_A , _A , _A )
# load HuggingFace model
lowerCAmelCase_ = SwiftFormerForImageClassification(_A ).eval()
hf_model.load_state_dict(_A )
# prepare test inputs
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' )
# compare outputs from both models
lowerCAmelCase_ = get_expected_output(_A )
lowerCAmelCase_ = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 )
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(_A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
_A = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 278 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : str ):
lowercase_ : Any = 1
lowercase_ : int = 2
while i * i <= n:
lowercase_ : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowercase ( ):
lowercase_ : int = 1
lowercase_ : Any = 1
while True:
i += 1
t_num += i
if count_divisors(_A ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 33 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class A ( __UpperCAmelCase ):
__snake_case = 'vit'
def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=16, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = encoder_stride
class A ( __UpperCAmelCase ):
__snake_case = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 1E-4
| 278 | 0 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : Union[str, Any] = checkpoints.load_tax_checkpoint(_A )
__lowerCamelCase : int = flatten_dict(_A )
return flax_params
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Dict = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__lowerCamelCase : str = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__lowerCamelCase : List[Any] = '.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__lowerCamelCase : str = new_key.replace(_A , _A )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__lowerCamelCase : Tuple = new_key.replace(_A , _A )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__lowerCamelCase : Any = re.sub(R'layers_(\d+)' , R'layer.\1' , _A )
__lowerCamelCase : Any = new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__lowerCamelCase : Union[str, Any] = re.sub(R'layers_(\d+)' , R'layer.\1' , _A )
__lowerCamelCase : List[Any] = flax_dict[key]
__lowerCamelCase : Tuple = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__lowerCamelCase : Any = torch.from_numpy(converted_dict[key].T )
else:
__lowerCamelCase : Dict = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ) -> Union[str, Any]:
__lowerCamelCase : List[Any] = get_flax_param(_A )
if not use_large:
__lowerCamelCase : Tuple = PixaStructVisionConfig()
__lowerCamelCase : Optional[Any] = PixaStructTextConfig()
else:
__lowerCamelCase : Dict = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
__lowerCamelCase : int = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
__lowerCamelCase : int = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_A )
__lowerCamelCase : int = PixaStructForConditionalGeneration(_A )
__lowerCamelCase : Optional[Any] = rename_and_convert_flax_params(_A )
model.load_state_dict(_A )
__lowerCamelCase : Dict = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__lowerCamelCase : Optional[int] = PixaStructImageProcessor()
__lowerCamelCase : int = PixaStructProcessor(image_processor=_A , tokenizer=_A )
if use_large:
__lowerCamelCase : Any = 4_0_9_6
__lowerCamelCase : int = True
# mkdir if needed
os.makedirs(_A , exist_ok=_A )
model.save_pretrained(_A )
processor.save_pretrained(_A )
print('Model saved in {}'.format(_A ) )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
a =parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 73 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCamelCase ( _A , _A ):
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = features.copy() if features else default_expected_features
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
lowerCAmelCase_ = features.copy() if features else default_expected_features
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __UpperCamelCase ( _A , _A ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowerCAmelCase_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
lowerCAmelCase_ = features.copy()
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read()
_check_json_dataset(_A , _A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __UpperCamelCase ( _A , _A , _A ):
if issubclass(_A , _A ):
lowerCAmelCase_ = jsonl_path
elif issubclass(_A , _A ):
lowerCAmelCase_ = [jsonl_path]
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
def __UpperCamelCase ( _A , _A , _A=("train",) ):
assert isinstance(_A , _A )
for split in splits:
lowerCAmelCase_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = features.copy() if features else default_expected_features
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , features=_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCamelCase ( _A , _A , _A ):
if split:
lowerCAmelCase_ = {split: jsonl_path}
else:
lowerCAmelCase_ = '''train'''
lowerCAmelCase_ = {'''train''': jsonl_path, '''test''': jsonl_path}
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __UpperCamelCase ( _A ):
return json.load(_A )
def __UpperCamelCase ( _A ):
return [json.loads(_A ) for line in buffer]
class A :
@pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__ ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
assert isinstance(exported_content[0], UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''', [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
], )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__ ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, num_proc=2 ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
assert isinstance(exported_content[0], UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''', [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
], )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__, num_proc=2 ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
with pytest.raises(UpperCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, num_proc=0 )
@pytest.mark.parametrize('''compression, extension''', [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / f"test.json.{extension}"
lowerCAmelCase_ = str(shared_datadir / f"test_file.json.{extension}" )
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, compression=UpperCamelCase__ ).write()
with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f:
lowerCAmelCase_ = f.read()
with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f:
lowerCAmelCase_ = f.read()
assert exported_content == original_content
| 278 | 0 |
def snake_case ( snake_case__ :Dict , snake_case__ :List[Any] , snake_case__ :List[str] , snake_case__ :Optional[Any]) -> Any:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path)
def snake_case ( snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Any) -> List[str]:
# Base Case
if curr_ind == len(_A):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_A)):
if valid_connection(_A , _A , _A , _A):
# Insert current vertex into path as next transition
_A = next_ver
# Validate created path
if util_hamilton_cycle(_A , _A , curr_ind + 1):
return True
# Backtrack
_A = -1
return False
def snake_case ( snake_case__ :Dict , snake_case__ :Optional[int] = 0) -> Optional[Any]:
_A = [-1] * (len(_A) + 1)
# initialize start and end of path with starting index
_A = _A = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_A , _A , 1) else []
| 180 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_A = '''scheduler_config.json'''
class A ( __UpperCAmelCase ):
__snake_case = 1
__snake_case = 2
__snake_case = 3
__snake_case = 4
__snake_case = 5
__snake_case = 6
__snake_case = 7
__snake_case = 8
__snake_case = 9
__snake_case = 10
__snake_case = 11
__snake_case = 12
__snake_case = 13
__snake_case = 14
@dataclass
class A ( __UpperCAmelCase ):
__snake_case = 42
class A :
__snake_case = SCHEDULER_CONFIG_NAME
__snake_case = []
__snake_case = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=False, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase__, subfolder=UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, return_commit_hash=UpperCamelCase__, **UpperCamelCase__, )
return cls.from_config(UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = False, **UpperCamelCase__ ):
"""simple docstring"""
self.save_config(save_directory=UpperCamelCase__, push_to_hub=UpperCamelCase__, **UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
lowerCAmelCase_ = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase_ = importlib.import_module(__name__.split('''.''' )[0] )
lowerCAmelCase_ = [
getattr(UpperCamelCase__, UpperCamelCase__ ) for c in compatible_classes_str if hasattr(UpperCamelCase__, UpperCamelCase__ )
]
return compatible_classes
| 278 | 0 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.exp(_A )
lowercase = torch.sum(_A , dim=1 ) # sum of exp(x_i)
lowercase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_A ) - B / A
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = config.output_attentions
lowercase = config.output_hidden_states
lowercase = nn.ModuleList([BertLayer(UpperCamelCase__) for _ in range(config.num_hidden_layers)])
lowercase = nn.ModuleList([BertHighway(UpperCamelCase__) for _ in range(config.num_hidden_layers)])
lowercase = [-1 for _ in range(config.num_hidden_layers)]
def A__ ( self ,A__):
if (type(UpperCamelCase__) is float) or (type(UpperCamelCase__) is int):
for i in range(len(self.early_exit_entropy)):
lowercase = x
else:
lowercase = x
def A__ ( self ,A__):
lowercase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def A__ ( self ,A__ ,A__=None ,A__=None ,A__=None ,A__=None ,):
lowercase = ()
lowercase = ()
lowercase = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
lowercase = layer_module(
UpperCamelCase__ ,UpperCamelCase__ ,head_mask[i] ,UpperCamelCase__ ,UpperCamelCase__)
lowercase = layer_outputs[0]
if self.output_attentions:
lowercase = all_attentions + (layer_outputs[1],)
lowercase = (hidden_states,)
if self.output_hidden_states:
lowercase = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowercase = current_outputs + (all_attentions,)
lowercase = self.highway[i](UpperCamelCase__)
# logits, pooled_output
if not self.training:
lowercase = highway_exit[0]
lowercase = entropy(UpperCamelCase__)
lowercase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowercase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowercase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase__ ,i + 1)
else:
lowercase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
lowercase = (hidden_states,)
if self.output_hidden_states:
lowercase = outputs + (all_hidden_states,)
if self.output_attentions:
lowercase = outputs + (all_attentions,)
lowercase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , __UpperCAmelCase , )
class lowercase ( __UpperCAmelCase ):
def __init__( self ,A__):
super().__init__(UpperCamelCase__)
lowercase = config
lowercase = BertEmbeddings(UpperCamelCase__)
lowercase = DeeBertEncoder(UpperCamelCase__)
lowercase = BertPooler(UpperCamelCase__)
self.init_weights()
def A__ ( self):
self.encoder.init_highway_pooler(self.pooler)
def A__ ( self):
return self.embeddings.word_embeddings
def A__ ( self ,A__):
lowercase = value
def A__ ( self ,A__):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase__)
@add_start_docstrings_to_model_forward(UpperCamelCase__)
def A__ ( self ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''')
elif input_ids is not None:
lowercase = input_ids.size()
elif inputs_embeds is not None:
lowercase = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''')
lowercase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase = torch.ones(UpperCamelCase__ ,device=UpperCamelCase__)
if encoder_attention_mask is None:
lowercase = torch.ones(UpperCamelCase__ ,device=UpperCamelCase__)
if token_type_ids is None:
lowercase = torch.zeros(UpperCamelCase__ ,dtype=torch.long ,device=UpperCamelCase__)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase = self.get_extended_attention_mask(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowercase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowercase = encoder_attention_mask[:, None, None, :]
lowercase = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
lowercase = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase = self.get_head_mask(UpperCamelCase__ ,self.config.num_hidden_layers)
lowercase = self.embeddings(
input_ids=UpperCamelCase__ ,position_ids=UpperCamelCase__ ,token_type_ids=UpperCamelCase__ ,inputs_embeds=UpperCamelCase__)
lowercase = self.encoder(
UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,head_mask=UpperCamelCase__ ,encoder_hidden_states=UpperCamelCase__ ,encoder_attention_mask=UpperCamelCase__ ,)
lowercase = encoder_outputs[0]
lowercase = self.pooler(UpperCamelCase__)
lowercase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowercase ( __UpperCAmelCase ):
def __init__( self ,A__ ,A__):
lowercase = message
lowercase = exit_layer # start from 1!
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = BertPooler(UpperCamelCase__)
lowercase = nn.Dropout(config.hidden_dropout_prob)
lowercase = nn.Linear(config.hidden_size ,config.num_labels)
def A__ ( self ,A__):
lowercase = encoder_outputs[0]
lowercase = self.pooler(UpperCamelCase__)
# "return" pooler_output
# BertModel
lowercase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowercase = bmodel_output[1]
lowercase = self.dropout(UpperCamelCase__)
lowercase = self.classifier(UpperCamelCase__)
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ''' , __UpperCAmelCase , )
class lowercase ( __UpperCAmelCase ):
def __init__( self ,A__):
super().__init__(UpperCamelCase__)
lowercase = config.num_labels
lowercase = config.num_hidden_layers
lowercase = DeeBertModel(UpperCamelCase__)
lowercase = nn.Dropout(config.hidden_dropout_prob)
lowercase = nn.Linear(config.hidden_size ,self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase__)
def A__ ( self ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=-1 ,A__=False ,):
lowercase = self.num_layers
try:
lowercase = self.bert(
UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,token_type_ids=UpperCamelCase__ ,position_ids=UpperCamelCase__ ,head_mask=UpperCamelCase__ ,inputs_embeds=UpperCamelCase__ ,)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowercase = outputs[1]
lowercase = self.dropout(UpperCamelCase__)
lowercase = self.classifier(UpperCamelCase__)
lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowercase = e.message
lowercase = e.exit_layer
lowercase = outputs[0]
if not self.training:
lowercase = entropy(UpperCamelCase__)
lowercase = []
lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(logits.view(-1) ,labels.view(-1))
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1))
# work with highway exits
lowercase = []
for highway_exit in outputs[-1]:
lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase__)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(highway_logits.view(-1) ,labels.view(-1))
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(highway_logits.view(-1 ,self.num_labels) ,labels.view(-1))
highway_losses.append(UpperCamelCase__)
if train_highway:
lowercase = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
lowercase = (loss,) + outputs
if not self.training:
lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 101 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.dummy_uncond_unet
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 278 | 0 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCamelCase__ ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : List[str] ,lowerCamelCase__ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
pass
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = LongformerModel.from_pretrained(_A )
SCREAMING_SNAKE_CASE = LightningModel(_A )
SCREAMING_SNAKE_CASE = torch.load(_A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
SCREAMING_SNAKE_CASE = LongformerForQuestionAnswering.from_pretrained(_A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_A )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 296 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 278 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : List[str] = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _UpperCAmelCase ( __UpperCAmelCase ):
a__ : str = "vit"
def __init__( self : Dict , _lowercase : Optional[int]=7_68 , _lowercase : Dict=12 , _lowercase : Tuple=12 , _lowercase : List[Any]=30_72 , _lowercase : Any="gelu" , _lowercase : Tuple=0.0 , _lowercase : Any=0.0 , _lowercase : int=0.02 , _lowercase : Tuple=1E-12 , _lowercase : Optional[Any]=2_24 , _lowercase : Optional[int]=16 , _lowercase : List[Any]=3 , _lowercase : List[str]=True , _lowercase : List[str]=16 , **_lowercase : List[str] , ):
super().__init__(**UpperCamelCase__ )
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = encoder_stride
class _UpperCAmelCase ( __UpperCAmelCase ):
a__ : Any = version.parse("1.11" )
@property
def a ( self : Optional[Any] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a ( self : Dict ):
return 1E-4
| 332 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __UpperCamelCase ( _A = 3 ):
if isinstance(_A , _A ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_A ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
lowerCAmelCase_ = QuantumRegister(_A , '''qr''' )
lowerCAmelCase_ = ClassicalRegister(_A , '''cr''' )
lowerCAmelCase_ = QuantumCircuit(_A , _A )
lowerCAmelCase_ = number_of_qubits
for i in range(_A ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_A ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_A , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_A , _A )
# simulate with 10000 shots
lowerCAmelCase_ = Aer.get_backend('''qasm_simulator''' )
lowerCAmelCase_ = execute(_A , _A , shots=10000 )
return job.result().get_counts(_A )
if __name__ == "__main__":
print(
f"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 278 | 0 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : int = 6008_5147_5143 ) -> Tuple:
"""simple docstring"""
try:
lowerCAmelCase_ : Any = int(_A )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : Dict = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowerCAmelCase_ : int = i
while n % i == 0:
lowerCAmelCase_ : Union[str, Any] = n // i
i += 1
return int(_A )
if __name__ == "__main__":
print(f'{solution() = }')
| 224 |
from functools import lru_cache
@lru_cache
def __UpperCamelCase ( _A ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 319 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = 384
lowerCAmelCase_ = 7
if "tiny" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 6, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "small" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "base" in model_name:
lowerCAmelCase_ = 128
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (4, 8, 16, 32)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 512
elif "large" in model_name:
lowerCAmelCase_ = 192
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (6, 12, 24, 48)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 768
# set label information
lowerCAmelCase_ = 150
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''ade20k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = SwinConfig(
embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
lowerCAmelCase_ = UperNetConfig(
backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , )
return config
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(_A )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:dim, :]
lowerCAmelCase_ = in_proj_bias[: dim]
lowerCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase_ = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase_ = in_proj_weight[
-dim :, :
]
lowerCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(4 , in_channel // 4 )
lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
lowerCAmelCase_ = model_name_to_url[model_name]
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[
'''state_dict'''
]
for name, param in state_dict.items():
print(_A , param.shape )
lowerCAmelCase_ = get_upernet_config(_A )
lowerCAmelCase_ = UperNetForSemanticSegmentation(_A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
if "bn" in key:
lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' )
lowerCAmelCase_ = val
# rename keys
lowerCAmelCase_ = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A )
if "norm" in key:
lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A )
model.load_state_dict(_A )
# verify on image
lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' )
lowerCAmelCase_ = SegformerImageProcessor()
lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowerCAmelCase_ = model(_A )
lowerCAmelCase_ = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowerCAmelCase_ = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
lowerCAmelCase_ = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
lowerCAmelCase_ = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
lowerCAmelCase_ = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Union[str, Any]:
return " ".join(
''''''.join(word[::-1] ) if len(_A ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 177 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = args.log_outputs
lowerCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowerCAmelCase_ = load_metric('''wer''' )
lowerCAmelCase_ = load_metric('''cer''' )
# compute metrics
lowerCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowerCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowerCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}"
print(_A )
with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f:
f.write(_A )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase_ = f"log_{dataset_id}_predictions.txt"
lowerCAmelCase_ = f"log_{dataset_id}_targets.txt"
with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t:
# mapping function to write output
def write_to_file(_A , _A ):
p.write(f"{i}" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"{i}" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(_A , with_indices=_A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase_ = re.sub(_A , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowerCAmelCase_ = ''' '''.join(text.split(_A ) )
return text
def __UpperCamelCase ( _A ):
# load dataset
lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase_ = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1
lowerCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_A ):
lowerCAmelCase_ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase_ = prediction['''text''']
lowerCAmelCase_ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowerCAmelCase_ = dataset.map(_A , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_A , _A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
_A = parser.parse_args()
main(args)
| 278 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class __snake_case ( __UpperCAmelCase ):
lowerCAmelCase_ = "bridgetower_vision_model"
def __init__( self : Optional[Any] , _lowercase : Optional[int]=7_68 , _lowercase : List[Any]=12 , _lowercase : List[str]=3 , _lowercase : List[Any]=16 , _lowercase : List[str]=2_88 , _lowercase : Tuple=1 , _lowercase : str=1E-05 , _lowercase : Optional[int]=False , _lowercase : Dict=True , _lowercase : Tuple=False , **_lowercase : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = stop_gradient
SCREAMING_SNAKE_CASE__ = share_layernorm
SCREAMING_SNAKE_CASE__ = remove_last_layer
@classmethod
def __a ( cls : Any , _lowercase : Optional[int] , **_lowercase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
if config_dict.get("""model_type""" ) == "bridgetower":
SCREAMING_SNAKE_CASE__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class __snake_case ( __UpperCAmelCase ):
lowerCAmelCase_ = "bridgetower_text_model"
def __init__( self : Dict , _lowercase : Dict=5_02_65 , _lowercase : Dict=7_68 , _lowercase : str=12 , _lowercase : Optional[Any]=12 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=30_72 , _lowercase : int="gelu" , _lowercase : List[str]=0.1 , _lowercase : List[str]=0.1 , _lowercase : List[Any]=5_14 , _lowercase : Any=1 , _lowercase : Dict=1E-05 , _lowercase : Tuple=1 , _lowercase : Tuple=0 , _lowercase : Any=2 , _lowercase : Dict="absolute" , _lowercase : Any=True , **_lowercase : int , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = bos_token_id
SCREAMING_SNAKE_CASE__ = eos_token_id
@classmethod
def __a ( cls : Union[str, Any] , _lowercase : Optional[int] , **_lowercase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
if config_dict.get("""model_type""" ) == "bridgetower":
SCREAMING_SNAKE_CASE__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class __snake_case ( __UpperCAmelCase ):
lowerCAmelCase_ = "bridgetower"
def __init__( self : int , _lowercase : List[Any]=True , _lowercase : str="gelu" , _lowercase : Tuple=7_68 , _lowercase : Dict=1 , _lowercase : Any=1E-05 , _lowercase : Tuple=False , _lowercase : Optional[int]="add" , _lowercase : str=12 , _lowercase : List[str]=6 , _lowercase : Tuple=False , _lowercase : str=False , _lowercase : Dict=None , _lowercase : List[Any]=None , **_lowercase : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = kwargs.pop("""text_config_dict""" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""vision_config_dict""" , UpperCamelCase__ )
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = share_cross_modal_transformer_layers
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = share_link_tower_layers
SCREAMING_SNAKE_CASE__ = link_tower_type
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = tie_word_embeddings
SCREAMING_SNAKE_CASE__ = init_layernorm_from_vision_encoder
if text_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
SCREAMING_SNAKE_CASE__ = BridgeTowerTextConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = BridgeTowerVisionConfig(**UpperCamelCase__ )
@classmethod
def __a ( cls : Dict , _lowercase : Union[str, Any] , _lowercase : List[str] , **_lowercase : List[str] ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__ )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 219 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''nielsr/canine-s''': 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
_A = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_A = 0
_A = 0xe0_00
_A = 0xe0_01
_A = 0xe0_02
_A = 0xe0_03
_A = 0xe0_04
# Maps special codepoints to human-readable names.
_A = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A ( __UpperCAmelCase ):
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=False, UpperCamelCase__=2048, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token
super().__init__(
bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, model_max_length=UpperCamelCase__, **UpperCamelCase__, )
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase_ = UNICODE_VOCAB_SIZE
lowerCAmelCase_ = len(self._special_codepoints )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._unicode_vocab_size
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return list(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
try:
return ord(UpperCamelCase__ )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase__ )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return "".join(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ )
lowerCAmelCase_ = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase__ )) + [1]
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
return ()
| 278 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] ):
# Construct model
if gpta_config_file == "":
lowerCAmelCase = GPTaConfig()
else:
lowerCAmelCase = GPTaConfig.from_json_file(_A )
lowerCAmelCase = GPTaModel(_A )
# Load weights from numpy
load_tf_weights_in_gpta(_A , _A , _A )
# Save pytorch-model
lowerCAmelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _A )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
__snake_case =parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 4 |
def __UpperCamelCase ( _A = 1000000 ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
lowerCAmelCase_ = {1: 1}
for inputa in range(2 , _A ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ = counter
if counter > pre_counter:
lowerCAmelCase_ = inputa
lowerCAmelCase_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 278 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def A ( self : List[Any] ) -> Tuple:
lowercase_ : Union[str, Any] = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
lowercase_ : Tuple = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(UpperCamelCase__ )
from datasets import load_dataset
lowercase_ : List[str] = load_dataset('''nielsr/rvlcdip-demo''' )
lowercase_ : Tuple = dataset['''train'''][0]['''image'''].convert('''RGB''' )
lowercase_ : Tuple = image_processor(UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase_ : Dict = model(**UpperCamelCase__ )
lowercase_ : List[Any] = outputs.logits
lowercase_ : Any = torch.Size((1, 16) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowercase_ : Optional[int] = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=UpperCamelCase__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 33 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
lowerCAmelCase_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(UpperCamelCase__ )
from datasets import load_dataset
lowerCAmelCase_ = load_dataset('''nielsr/rvlcdip-demo''' )
lowerCAmelCase_ = dataset['''train'''][0]['''image'''].convert('''RGB''' )
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
lowerCAmelCase_ = outputs.logits
lowerCAmelCase_ = torch.Size((1, 16) )
self.assertEqual(logits.shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347], device=UpperCamelCase__, dtype=torch.float, )
self.assertTrue(torch.allclose(logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
| 278 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A_ ( __UpperCAmelCase ):
_UpperCAmelCase : Any = '''naver-clova-ix/donut-base-finetuned-docvqa'''
_UpperCAmelCase : Any = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
_UpperCAmelCase : Optional[int] = '''document_qa'''
_UpperCAmelCase : List[str] = AutoProcessor
_UpperCAmelCase : Optional[Any] = VisionEncoderDecoderModel
_UpperCAmelCase : Optional[Any] = ['''image''', '''text''']
_UpperCAmelCase : int = ['''text''']
def __init__( self : str ,*SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : str):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.')
super().__init__(*UpperCamelCase__ ,**UpperCamelCase__)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : List[Any] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__lowerCamelCase : Union[str, Any] = task_prompt.replace('{user_input}' ,UpperCamelCase__)
__lowerCamelCase : int = self.pre_processor.tokenizer(
UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ ,return_tensors='pt').input_ids
__lowerCamelCase : Tuple = self.pre_processor(UpperCamelCase__ ,return_tensors='pt').pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
return self.model.generate(
inputs['pixel_values'].to(self.device) ,decoder_input_ids=inputs['decoder_input_ids'].to(self.device) ,max_length=self.model.decoder.config.max_position_embeddings ,early_stopping=UpperCamelCase__ ,pad_token_id=self.pre_processor.tokenizer.pad_token_id ,eos_token_id=self.pre_processor.tokenizer.eos_token_id ,use_cache=UpperCamelCase__ ,num_beams=1 ,bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] ,return_dict_in_generate=UpperCamelCase__ ,).sequences
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : Any = self.pre_processor.batch_decode(UpperCamelCase__)[0]
__lowerCamelCase : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.eos_token ,'')
__lowerCamelCase : str = sequence.replace(self.pre_processor.tokenizer.pad_token ,'')
__lowerCamelCase : Tuple = re.sub(R'<.*?>' ,'' ,UpperCamelCase__ ,count=1).strip() # remove first task start token
__lowerCamelCase : Any = self.pre_processor.tokenajson(UpperCamelCase__)
return sequence["answer"]
| 73 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = checkpoints.load_tax_checkpoint(_A )
lowerCAmelCase_ = flatten_dict(_A )
return flax_params
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowerCAmelCase_ = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCAmelCase_ = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCAmelCase_ = new_key.replace(_A , _A )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCAmelCase_ = new_key.replace(_A , _A )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A )
lowerCAmelCase_ = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A )
lowerCAmelCase_ = flax_dict[key]
lowerCAmelCase_ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCAmelCase_ = torch.from_numpy(converted_dict[key].T )
else:
lowerCAmelCase_ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __UpperCamelCase ( _A , _A , _A=False , _A=False ):
lowerCAmelCase_ = get_flax_param(_A )
if not use_large:
lowerCAmelCase_ = PixaStructVisionConfig()
lowerCAmelCase_ = PixaStructTextConfig()
else:
lowerCAmelCase_ = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCAmelCase_ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCAmelCase_ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_A )
lowerCAmelCase_ = PixaStructForConditionalGeneration(_A )
lowerCAmelCase_ = rename_and_convert_flax_params(_A )
model.load_state_dict(_A )
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowerCAmelCase_ = PixaStructImageProcessor()
lowerCAmelCase_ = PixaStructProcessor(image_processor=_A , tokenizer=_A )
if use_large:
lowerCAmelCase_ = 4096
lowerCAmelCase_ = True
# mkdir if needed
os.makedirs(_A , exist_ok=_A )
model.save_pretrained(_A )
processor.save_pretrained(_A )
print('''Model saved in {}'''.format(_A ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 278 | 0 |
def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]:
_A = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_A = set()
return any(
node not in visited and depth_first_search(_A , _A , _A , _A)
for node in graph)
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any] , snake_case__ :Optional[Any] , snake_case__ :Tuple) -> int:
visited.add(_A)
rec_stk.add(_A)
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_A , _A , _A , _A):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_A)
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 180 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_A = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''', UpperCamelCase__, )
super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
| 278 | 0 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_failure_array(_A )
# 2) Step through text searching for pattern
lowercase , lowercase = 0, 0 # index into text, pattern
while i < len(_A ):
if pattern[j] == text[i]:
if j == (len(_A ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowercase = failure[j - 1]
continue
i += 1
return False
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [0]
lowercase = 0
lowercase = 1
while j < len(_A ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowercase = failure[i - 1]
continue
j += 1
failure.append(_A )
return failure
if __name__ == "__main__":
# Test 1)
lowercase__ :Any = "abc1abc12"
lowercase__ :str = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowercase__ :Dict = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowercase__ :List[str] = "ABABX"
lowercase__ :Optional[int] = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
lowercase__ :Union[str, Any] = "AAAB"
lowercase__ :Union[str, Any] = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
lowercase__ :List[str] = "abcdabcy"
lowercase__ :int = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
lowercase__ :Dict = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 101 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCAmelCase_ = 192
lowerCAmelCase_ = 768
lowerCAmelCase_ = 12
lowerCAmelCase_ = 3
lowerCAmelCase_ = [800, 1333]
lowerCAmelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase_ = 330
lowerCAmelCase_ = 14
lowerCAmelCase_ = 6
lowerCAmelCase_ = 1320
elif "yolos_s" in yolos_name:
lowerCAmelCase_ = 384
lowerCAmelCase_ = 1536
lowerCAmelCase_ = 12
lowerCAmelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCAmelCase_ = [800, 1344]
lowerCAmelCase_ = 91
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''coco-detection-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( _A , _A , _A = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase_ = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCAmelCase_ = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( _A ):
if "backbone" in name:
lowerCAmelCase_ = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
lowerCAmelCase_ = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
lowerCAmelCase_ = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
lowerCAmelCase_ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
lowerCAmelCase_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
lowerCAmelCase_ = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowerCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
lowerCAmelCase_ = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
lowerCAmelCase_ = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
lowerCAmelCase_ = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __UpperCamelCase ( _A , _A ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(_A )
if "qkv" in key:
lowerCAmelCase_ = key.split('''.''' )
lowerCAmelCase_ = int(key_split[2] )
lowerCAmelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = val
return orig_state_dict
def __UpperCamelCase ( ):
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A , _A = False ):
lowerCAmelCase_ = get_yolos_config(_A )
# load original state_dict
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )['''model''']
# load 🤗 model
lowerCAmelCase_ = YolosForObjectDetection(_A )
model.eval()
lowerCAmelCase_ = convert_state_dict(_A , _A )
model.load_state_dict(_A )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCAmelCase_ = 800 if yolos_name != '''yolos_ti''' else 512
lowerCAmelCase_ = YolosImageProcessor(format='''coco_detection''' , size=_A )
lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.logits, outputs.pred_boxes
lowerCAmelCase_ , lowerCAmelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCAmelCase_ = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCAmelCase_ = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCAmelCase_ = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCAmelCase_ = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCAmelCase_ = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCAmelCase_ = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase_ = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCAmelCase_ = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCAmelCase_ = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCAmelCase_ = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1E-4 )
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_A )
if push_to_hub:
lowerCAmelCase_ = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
lowerCAmelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(_A , organization='''hustvl''' )
model.push_to_hub(_A , organization='''hustvl''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 0 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
SCREAMING_SNAKE_CASE_ = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = len(_A )
for i in range(_A ):
SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , _A ):
if arr[i] < arr[j]:
SCREAMING_SNAKE_CASE = arr[j]
break
result.append(_A )
return result
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(_A ):
SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
SCREAMING_SNAKE_CASE = inner
break
result.append(_A )
return result
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(_A )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(_A ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
SCREAMING_SNAKE_CASE_ = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 296 |
def __UpperCamelCase ( _A ):
if not numbers:
return 0
if not isinstance(_A , (list, tuple) ) or not all(
isinstance(_A , _A ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0]
for i in range(1 , len(_A ) ):
# update the maximum and minimum subarray products
lowerCAmelCase_ = numbers[i]
if number < 0:
lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now
lowerCAmelCase_ = max(_A , max_till_now * number )
lowerCAmelCase_ = min(_A , min_till_now * number )
# update the maximum product found till now
lowerCAmelCase_ = max(_A , _A )
return max_prod
| 278 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : int ):
super().tearDown()
gc.collect()
def a ( self : Tuple ):
__UpperCAmelCase , __UpperCAmelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa )
__UpperCAmelCase , __UpperCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=UpperCamelCase__ , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa )
__UpperCAmelCase = controlnet_params
__UpperCAmelCase = '''bird'''
__UpperCAmelCase = jax.device_count()
__UpperCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
__UpperCAmelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
__UpperCAmelCase = jax.random.PRNGKey(0 )
__UpperCAmelCase = jax.random.split(UpperCamelCase__ , jax.device_count() )
__UpperCAmelCase = replicate(UpperCamelCase__ )
__UpperCAmelCase = shard(UpperCamelCase__ )
__UpperCAmelCase = shard(UpperCamelCase__ )
__UpperCAmelCase = pipe(
prompt_ids=UpperCamelCase__ , image=UpperCamelCase__ , params=UpperCamelCase__ , prng_seed=UpperCamelCase__ , num_inference_steps=50 , jit=UpperCamelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
__UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__UpperCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
__UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCAmelCase = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def a ( self : List[str] ):
__UpperCAmelCase , __UpperCAmelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa )
__UpperCAmelCase , __UpperCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=UpperCamelCase__ , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa )
__UpperCAmelCase = controlnet_params
__UpperCAmelCase = '''Chef in the kitchen'''
__UpperCAmelCase = jax.device_count()
__UpperCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
__UpperCAmelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
__UpperCAmelCase = jax.random.PRNGKey(0 )
__UpperCAmelCase = jax.random.split(UpperCamelCase__ , jax.device_count() )
__UpperCAmelCase = replicate(UpperCamelCase__ )
__UpperCAmelCase = shard(UpperCamelCase__ )
__UpperCAmelCase = shard(UpperCamelCase__ )
__UpperCAmelCase = pipe(
prompt_ids=UpperCamelCase__ , image=UpperCamelCase__ , params=UpperCamelCase__ , prng_seed=UpperCamelCase__ , num_inference_steps=50 , jit=UpperCamelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
__UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__UpperCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
__UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCAmelCase = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 332 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
lowerCAmelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCAmelCase_ = [4, 4, 4, 4]
lowerCAmelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
else:
lowerCAmelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCAmelCase_ = 96
elif "small" in model_name:
lowerCAmelCase_ = 96
elif "base" in model_name:
lowerCAmelCase_ = 128
elif "large" in model_name:
lowerCAmelCase_ = 192
elif "xlarge" in model_name:
lowerCAmelCase_ = 256
elif "huge" in model_name:
lowerCAmelCase_ = 352
# set label information
lowerCAmelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCAmelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = FocalNetConfig(
embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , )
return config
def __UpperCamelCase ( _A ):
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowerCAmelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowerCAmelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase_ = name.replace('''head''' , '''classifier''' )
else:
lowerCAmelCase_ = '''focalnet.''' + name
return name
def __UpperCamelCase ( _A , _A , _A=False ):
# fmt: off
lowerCAmelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCAmelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _A )
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
lowerCAmelCase_ = val
lowerCAmelCase_ = get_focalnet_config(_A )
lowerCAmelCase_ = FocalNetForImageClassification(_A )
model.eval()
# load state dict
model.load_state_dict(_A )
# verify conversion
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = BitImageProcessor(
do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , )
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' )
lowerCAmelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 )
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
_A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 0 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = DownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """down"""
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ResnetDownsampleBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """down"""
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnDownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """down"""
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CrossAttnDownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """down"""
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ ,lowerCAmelCase_ : int = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ : List[Any] = 3_2
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SimpleCrossAttnDownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """down"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ ,lowerCAmelCase_ : str = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ : Optional[Any] = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SkipDownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """down"""
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
return super().get_dummy_input(include_skip_sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Union[str, Any] = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnSkipDownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """down"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return super().get_dummy_input(include_skip_sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Optional[Any] = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = DownEncoderBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """down"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return super().get_dummy_input(include_temb=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : int = {
'in_channels': 3_2,
'out_channels': 3_2,
}
lowerCAmelCase_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : int = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnDownEncoderBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """down"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return super().get_dummy_input(include_temb=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : str = {
'in_channels': 3_2,
'out_channels': 3_2,
}
lowerCAmelCase_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetMidBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """mid"""
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = {
'in_channels': 3_2,
'temb_channels': 1_2_8,
}
lowerCAmelCase_ : Dict = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetMidBlockaDCrossAttn # noqa F405
_SCREAMING_SNAKE_CASE = """mid"""
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ ,lowerCAmelCase_ : Any = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ : str = 3_2
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetMidBlockaDSimpleCrossAttn # noqa F405
_SCREAMING_SNAKE_CASE = """mid"""
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ : Union[str, Any] = 3_2
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : Optional[Any] = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """up"""
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : int = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ResnetUpsampleBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """up"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : int = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CrossAttnUpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """up"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ : Optional[int] = 3_2
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Optional[int] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SimpleCrossAttnUpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """up"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase__ , include_encoder_hidden_states=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase_ : Any = 3_2
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : Optional[int] = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnUpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """up"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase__ )
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Any = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SkipUpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """up"""
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : Any = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnSkipUpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """up"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : List[str] = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UpDecoderBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """up"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return super().get_dummy_input(include_temb=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : List[str] = {'in_channels': 3_2, 'out_channels': 3_2}
lowerCAmelCase_ : Dict = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[str] = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(UpperCamelCase__ )
class UpperCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnUpDecoderBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = """up"""
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return super().get_dummy_input(include_temb=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Tuple = {'in_channels': 3_2, 'out_channels': 3_2}
lowerCAmelCase_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : int = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(UpperCamelCase__ )
| 224 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A )
lowerCAmelCase_ = emb.weight.data
return lin_layer
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )
lowerCAmelCase_ = Namespace(**checkpoint['''cfg''']['''model'''] )
lowerCAmelCase_ = checkpoint['''model''']
remove_ignore_keys_(_A )
lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowerCAmelCase_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
lowerCAmelCase_ = XGLMConfig(
vocab_size=_A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCAmelCase_ = XGLMForCausalLM(_A )
lowerCAmelCase_ = model.load_state_dict(_A , strict=_A )
print(_A )
lowerCAmelCase_ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A = parser.parse_args()
_A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 278 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : int = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase_ : Dict = field(
default=__UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase_ : List[str] = field(
default=__UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase_ : Any = field(
default=__UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase_ : Tuple = field(default=__UpperCAmelCase , metadata={"""help""": """Whether tp freeze the encoder."""} )
UpperCamelCase_ : Tuple = field(default=__UpperCAmelCase , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
UpperCamelCase_ : List[Any] = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
UpperCamelCase_ : Dict = field(
default=1024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase_ : str = field(
default=128 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase_ : Union[str, Any] = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
UpperCamelCase_ : Tuple = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase_ : Dict = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
UpperCamelCase_ : Tuple = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
UpperCamelCase_ : List[Any] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
UpperCamelCase_ : Dict = field(default=__UpperCAmelCase , metadata={"""help""": """Source language id for translation."""} )
UpperCamelCase_ : List[Any] = field(default=__UpperCAmelCase , metadata={"""help""": """Target language id for translation."""} )
UpperCamelCase_ : Optional[int] = field(default=__UpperCAmelCase , metadata={"""help""": """# num_beams to use for evaluation."""} )
UpperCamelCase_ : Union[str, Any] = field(
default=__UpperCAmelCase , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(_A , os.path.join(_A , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A: Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A: Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A: Union[str, Any] = parser.parse_args_into_dataclasses()
check_output_dir(_A )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , _A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A: Optional[int] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(_A , _A , _A ):
assert hasattr(_A , _A ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(_A , _A , getattr(_A , _A ) )
A: str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A: Any = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=_A , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_A , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
A: Union[str, Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_A , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_A , _A ):
A: Dict = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
A: Dict = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_A )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
A: Optional[Any] = SeqaSeqDataset
# Get datasets
A: str = (
dataset_class(
_A , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
A: Dict = (
dataset_class(
_A , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
A: Any = (
dataset_class(
_A , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
A: str = (
build_compute_metrics_fn(data_args.task , _A ) if training_args.predict_with_generate else None
)
A: Optional[int] = SeqaSeqTrainer(
model=_A , args=_A , data_args=_A , train_dataset=_A , eval_dataset=_A , data_collator=SeqaSeqDataCollator(
_A , _A , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_A , tokenizer=_A , )
A: str = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
A: Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
A: Any = train_result.metrics
A: Tuple = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , _A , training_args.output_dir )
all_metrics.update(_A )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A: Dict = trainer.evaluate(metric_key_prefix='''val''' )
A: int = data_args.n_val
A: Optional[int] = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , _A , training_args.output_dir )
all_metrics.update(_A )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
A: Union[str, Any] = trainer.predict(test_dataset=_A , metric_key_prefix='''test''' )
A: Optional[int] = test_output.metrics
A: str = data_args.n_test
if trainer.is_world_process_zero():
A: Dict = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , _A , training_args.output_dir )
all_metrics.update(_A )
if training_args.predict_with_generate:
A: Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
A: int = lmap(str.strip , _A )
write_txt_file(_A , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(_A , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def SCREAMING_SNAKE_CASE( __lowercase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 319 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A = dict(zip(vocab, range(len(vocab))))
_A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A = Path(tmpdirname)
_A = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
_A = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 278 | 0 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__A = 4
__A = 3
class UpperCAmelCase (__UpperCAmelCase ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict:
for shard in shards:
for i in range(_A ):
yield {"i": i, "shard": shard}
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
lowercase__: Tuple = int(os.environ['''RANK'''] )
lowercase__: Tuple = int(os.environ['''WORLD_SIZE'''] )
lowercase__: Union[str, Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=_A )
parser.add_argument('''--local_rank''' , type=_A )
parser.add_argument('''--num_workers''' , type=_A , default=0 )
lowercase__: Tuple = parser.parse_args()
lowercase__: int = args.streaming
lowercase__: Tuple = args.num_workers
lowercase__: Optional[int] = {'''shards''': [F"""shard_{shard_idx}""" for shard_idx in range(_A )]}
lowercase__: Tuple = IterableDataset.from_generator(_A , gen_kwargs=_A )
if not streaming:
lowercase__: str = Dataset.from_list(list(_A ) )
lowercase__: Tuple = split_dataset_by_node(_A , rank=_A , world_size=_A )
lowercase__: Optional[int] = torch.utils.data.DataLoader(_A , num_workers=_A )
lowercase__: str = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__: Union[str, Any] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__: Optional[Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 177 |
import argparse
from collections import defaultdict
import yaml
_A = '''docs/source/en/_toctree.yml'''
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = defaultdict(_A )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCAmelCase_ = [key for key, value in counts.items() if value > 1]
lowerCAmelCase_ = []
for duplicate_key in duplicates:
lowerCAmelCase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(_A ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(_A , key=lambda _A : s["title"].lower() )
def __UpperCamelCase ( _A=False ):
with open(_A , encoding='''utf-8''' ) as f:
lowerCAmelCase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase_ = content[api_idx]['''sections''']
# Then to the model doc
lowerCAmelCase_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCAmelCase_ = api_doc[model_idx]['''sections''']
lowerCAmelCase_ = [(idx, section) for idx, section in enumerate(_A ) if '''sections''' in section]
lowerCAmelCase_ = False
for idx, modality_doc in modalities_docs:
lowerCAmelCase_ = modality_doc['''sections''']
lowerCAmelCase_ = clean_model_doc_toc(_A )
if old_modality_doc != new_modality_doc:
lowerCAmelCase_ = True
if overwrite:
lowerCAmelCase_ = new_modality_doc
if diff:
if overwrite:
lowerCAmelCase_ = model_doc
lowerCAmelCase_ = api_doc
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_A = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 278 | 0 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __snake_case ( __UpperCAmelCase ):
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = SMALL_MODEL_IDENTIFIER
SCREAMING_SNAKE_CASE__ = """pt"""
SCREAMING_SNAKE_CASE__ = """tf"""
def __a ( self : List[Any] , _lowercase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase__ )
def __a ( self : Tuple , _lowercase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase__ )
model_tf.save_pretrained(UpperCamelCase__ )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """mock_framework"""
# Framework provided - return whatever the user provides
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(self.test_model , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __a ( self : List[Any] ):
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(UpperCamelCase__ )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MagicMock(return_value=UpperCamelCase__ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
SCREAMING_SNAKE_CASE__ = MagicMock(return_value=UpperCamelCase__ )
with patch("""transformers.onnx.features.is_torch_available""" , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_tf )
# Both in environment -> use PyTorch
SCREAMING_SNAKE_CASE__ = MagicMock(return_value=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = MagicMock(return_value=UpperCamelCase__ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase__ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# Both not in environment -> raise error
SCREAMING_SNAKE_CASE__ = MagicMock(return_value=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = MagicMock(return_value=UpperCamelCase__ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase__ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase__ ):
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(self.test_model )
| 219 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A ( __UpperCAmelCase ):
__snake_case = (UnCLIPScheduler,)
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**UpperCamelCase__ )
return config
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = 0.5
assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase_ = None
else:
lowerCAmelCase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
| 278 | 0 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] ):
lowerCAmelCase = OmegaConf.load(_A )
lowerCAmelCase = torch.load(_A , map_location='cpu' )['model']
lowerCAmelCase = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase = {}
lowerCAmelCase = 'first_stage_model.'
for key in keys:
if key.startswith(_A ):
lowerCAmelCase = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase = {}
lowerCAmelCase = 'model.diffusion_model.'
for key in keys:
if key.startswith(_A ):
lowerCAmelCase = state_dict[key]
lowerCAmelCase = config.model.params.first_stage_config.params
lowerCAmelCase = config.model.params.unet_config.params
lowerCAmelCase = VQModel(**_A ).eval()
vqvae.load_state_dict(_A )
lowerCAmelCase = UNetLDMModel(**_A ).eval()
unet.load_state_dict(_A )
lowerCAmelCase = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_A , )
lowerCAmelCase = LDMPipeline(_A , _A , _A )
pipeline.save_pretrained(_A )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
__snake_case =parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 4 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = torch.device('''cpu''')
def __UpperCamelCase ( ):
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
return im
def __UpperCamelCase ( _A ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(_A )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
for k in state_dict.keys():
lowerCAmelCase_ = k
if ".pwconv" in k:
lowerCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
lowerCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
lowerCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
lowerCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
lowerCAmelCase_ = k_new.split('''.''' )
if ls[2].isdigit():
lowerCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
lowerCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase_ = 1000
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCAmelCase_ = [3, 3, 6, 4]
lowerCAmelCase_ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCAmelCase_ = [3, 3, 9, 6]
lowerCAmelCase_ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCAmelCase_ = [4, 3, 10, 5]
lowerCAmelCase_ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCAmelCase_ = [4, 4, 12, 6]
lowerCAmelCase_ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , check_hash=_A )
else:
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = create_rename_keys(_A )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_A , _A , _A )
# load HuggingFace model
lowerCAmelCase_ = SwiftFormerForImageClassification(_A ).eval()
hf_model.load_state_dict(_A )
# prepare test inputs
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' )
# compare outputs from both models
lowerCAmelCase_ = get_expected_output(_A )
lowerCAmelCase_ = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 )
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(_A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
_A = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 278 | 0 |
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class _UpperCAmelCase :
def __init__( self : Tuple , A : Any ) -> List[str]:
lowercase_ : Optional[int] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowercase_ : Dict = len(UpperCamelCase__ ) - 1
def A ( self : Union[str, Any] , A : Dict ) -> Union[str, Any]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase_ : Union[str, Any] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCamelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCamelCase__ ) , 5 ) == 1
return output_values
def A ( self : Union[str, Any] , A : Tuple ) -> Optional[int]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase_ : str = self.basis_function(UpperCamelCase__ )
lowercase_ : Optional[int] = 0.0
lowercase_ : List[str] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def A ( self : Any , A : Optional[Any] = 0.01 ) -> Tuple:
from matplotlib import pyplot as plt # type: ignore
lowercase_ : Optional[Any] = [] # x coordinates of points to plot
lowercase_ : Dict = [] # y coordinates of points to plot
lowercase_ : Optional[int] = 0.0
while t <= 1:
lowercase_ : Optional[Any] = self.bezier_curve_function(UpperCamelCase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowercase_ : Any = [i[0] for i in self.list_of_points]
lowercase_ : Any = [i[1] for i in self.list_of_points]
plt.plot(
UpperCamelCase__ , UpperCamelCase__ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 33 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class A ( __UpperCAmelCase ):
__snake_case = 'vit'
def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=16, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = encoder_stride
class A ( __UpperCAmelCase ):
__snake_case = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 1E-4
| 278 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a =random.Random()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=1.0 , lowerCamelCase__=None , lowerCamelCase__=None ) -> List[Any]:
if rng is None:
__lowerCamelCase : Union[str, Any] = global_rng
__lowerCamelCase : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A_ ( unittest.TestCase ):
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str]=7 ,SCREAMING_SNAKE_CASE__ : str=4_0_0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_0_0_0 ,SCREAMING_SNAKE_CASE__ : Tuple=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : str=1_2_8 ,SCREAMING_SNAKE_CASE__ : Dict=1 ,SCREAMING_SNAKE_CASE__ : Any=5_1_2 ,SCREAMING_SNAKE_CASE__ : List[Any]=3_0 ,SCREAMING_SNAKE_CASE__ : List[Any]=4_4_1_0_0 ,):
__lowerCamelCase : List[Any] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : List[Any] = min_seq_length
__lowerCamelCase : int = max_seq_length
__lowerCamelCase : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCamelCase : List[str] = spectrogram_length
__lowerCamelCase : List[str] = feature_size
__lowerCamelCase : Any = num_audio_channels
__lowerCamelCase : Any = hop_length
__lowerCamelCase : str = chunk_length
__lowerCamelCase : List[Any] = sampling_rate
def lowerCAmelCase ( self : int):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str=False ,SCREAMING_SNAKE_CASE__ : Optional[int]=False):
def _flatten(SCREAMING_SNAKE_CASE__ : Optional[Any]):
return list(itertools.chain(*UpperCamelCase__))
if equal_length:
__lowerCamelCase : List[str] = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
__lowerCamelCase : Optional[int] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff)
]
if numpify:
__lowerCamelCase : Optional[int] = [np.asarray(UpperCamelCase__) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A_ ( __UpperCAmelCase , unittest.TestCase ):
_UpperCAmelCase : List[str] = TvltFeatureExtractor
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[Any] = TvltFeatureExtractionTester(self)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(UpperCamelCase__ ,'spectrogram_length'))
self.assertTrue(hasattr(UpperCamelCase__ ,'feature_size'))
self.assertTrue(hasattr(UpperCamelCase__ ,'num_audio_channels'))
self.assertTrue(hasattr(UpperCamelCase__ ,'hop_length'))
self.assertTrue(hasattr(UpperCamelCase__ ,'chunk_length'))
self.assertTrue(hasattr(UpperCamelCase__ ,'sampling_rate'))
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : int = feat_extract_first.save_pretrained(UpperCamelCase__)[0]
check_json_file_has_correct_format(UpperCamelCase__)
__lowerCamelCase : Union[str, Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__)
__lowerCamelCase : Optional[Any] = feat_extract_first.to_dict()
__lowerCamelCase : Optional[Any] = feat_extract_second.to_dict()
__lowerCamelCase : Tuple = dict_first.pop('mel_filters')
__lowerCamelCase : List[str] = dict_second.pop('mel_filters')
self.assertTrue(np.allclose(UpperCamelCase__ ,UpperCamelCase__))
self.assertEqual(UpperCamelCase__ ,UpperCamelCase__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : str = os.path.join(UpperCamelCase__ ,'feat_extract.json')
feat_extract_first.to_json_file(UpperCamelCase__)
__lowerCamelCase : Optional[Any] = self.feature_extraction_class.from_json_file(UpperCamelCase__)
__lowerCamelCase : int = feat_extract_first.to_dict()
__lowerCamelCase : Any = feat_extract_second.to_dict()
__lowerCamelCase : str = dict_first.pop('mel_filters')
__lowerCamelCase : int = dict_second.pop('mel_filters')
self.assertTrue(np.allclose(UpperCamelCase__ ,UpperCamelCase__))
self.assertEqual(UpperCamelCase__ ,UpperCamelCase__)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict)
# create three inputs of length 800, 1000, and 1200
__lowerCamelCase : Dict = [floats_list((1, x))[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0)]
__lowerCamelCase : Union[str, Any] = [np.asarray(UpperCamelCase__) for speech_input in speech_inputs]
# Test not batched input
__lowerCamelCase : int = feature_extractor(np_speech_inputs[0] ,return_tensors='np' ,sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test batched
__lowerCamelCase : List[Any] = feature_extractor(UpperCamelCase__ ,return_tensors='np' ,sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test audio masking
__lowerCamelCase : List[Any] = feature_extractor(
UpperCamelCase__ ,return_tensors='np' ,sampling_rate=4_4_1_0_0 ,mask_audio=UpperCamelCase__).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test 2-D numpy arrays are batched.
__lowerCamelCase : Optional[int] = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__lowerCamelCase : int = np.asarray(UpperCamelCase__)
__lowerCamelCase : Any = feature_extractor(UpperCamelCase__ ,return_tensors='np' ,sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : Optional[int] = load_dataset('hf-internal-testing/librispeech_asr_dummy' ,'clean' ,split='validation')
# automatic decoding with librispeech
__lowerCamelCase : List[str] = ds.sort('id').select(range(UpperCamelCase__))[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Dict = self._load_datasamples(1)
__lowerCamelCase : Dict = TvltFeatureExtractor()
__lowerCamelCase : str = feature_extractor(UpperCamelCase__ ,return_tensors='pt').audio_values
self.assertEquals(audio_values.shape ,(1, 1, 1_9_2, 1_2_8))
__lowerCamelCase : List[Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]])
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,UpperCamelCase__ ,atol=1E-4))
| 73 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCamelCase ( _A , _A ):
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = features.copy() if features else default_expected_features
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
lowerCAmelCase_ = features.copy() if features else default_expected_features
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __UpperCamelCase ( _A , _A ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowerCAmelCase_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
lowerCAmelCase_ = features.copy()
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read()
_check_json_dataset(_A , _A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __UpperCamelCase ( _A , _A , _A ):
if issubclass(_A , _A ):
lowerCAmelCase_ = jsonl_path
elif issubclass(_A , _A ):
lowerCAmelCase_ = [jsonl_path]
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
def __UpperCamelCase ( _A , _A , _A=("train",) ):
assert isinstance(_A , _A )
for split in splits:
lowerCAmelCase_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = features.copy() if features else default_expected_features
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , features=_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCamelCase ( _A , _A , _A ):
if split:
lowerCAmelCase_ = {split: jsonl_path}
else:
lowerCAmelCase_ = '''train'''
lowerCAmelCase_ = {'''train''': jsonl_path, '''test''': jsonl_path}
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __UpperCamelCase ( _A ):
return json.load(_A )
def __UpperCamelCase ( _A ):
return [json.loads(_A ) for line in buffer]
class A :
@pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__ ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
assert isinstance(exported_content[0], UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''', [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
], )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__ ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, num_proc=2 ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
assert isinstance(exported_content[0], UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''', [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
], )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__, num_proc=2 ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
with pytest.raises(UpperCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, num_proc=0 )
@pytest.mark.parametrize('''compression, extension''', [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / f"test.json.{extension}"
lowerCAmelCase_ = str(shared_datadir / f"test_file.json.{extension}" )
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, compression=UpperCamelCase__ ).write()
with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f:
lowerCAmelCase_ = f.read()
with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f:
lowerCAmelCase_ = f.read()
assert exported_content == original_content
| 278 | 0 |
def snake_case ( snake_case__ :Dict = 1_000_000) -> Optional[Any]:
_A = set(range(3 , _A , 2))
primes.add(2)
for p in range(3 , _A , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A)))
_A = [float(_A) for n in range(limit + 1)]
for p in primes:
for n in range(_A , limit + 1 , _A):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(F'''{solution() = }''')
| 180 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_A = '''scheduler_config.json'''
class A ( __UpperCAmelCase ):
__snake_case = 1
__snake_case = 2
__snake_case = 3
__snake_case = 4
__snake_case = 5
__snake_case = 6
__snake_case = 7
__snake_case = 8
__snake_case = 9
__snake_case = 10
__snake_case = 11
__snake_case = 12
__snake_case = 13
__snake_case = 14
@dataclass
class A ( __UpperCAmelCase ):
__snake_case = 42
class A :
__snake_case = SCHEDULER_CONFIG_NAME
__snake_case = []
__snake_case = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=False, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase__, subfolder=UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, return_commit_hash=UpperCamelCase__, **UpperCamelCase__, )
return cls.from_config(UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = False, **UpperCamelCase__ ):
"""simple docstring"""
self.save_config(save_directory=UpperCamelCase__, push_to_hub=UpperCamelCase__, **UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
lowerCAmelCase_ = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase_ = importlib.import_module(__name__.split('''.''' )[0] )
lowerCAmelCase_ = [
getattr(UpperCamelCase__, UpperCamelCase__ ) for c in compatible_classes_str if hasattr(UpperCamelCase__, UpperCamelCase__ )
]
return compatible_classes
| 278 | 0 |
from math import ceil
def UpperCamelCase ( lowerCAmelCase__ = 1001 ):
'''simple docstring'''
lowercase = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowercase = 2 * i + 1
lowercase = 2 * i
lowercase = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowercase__ :Optional[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 101 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.dummy_uncond_unet
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 278 | 0 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""nielsr/canine-s""": 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
SCREAMING_SNAKE_CASE_ = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0xe000
SCREAMING_SNAKE_CASE_ = 0xe001
SCREAMING_SNAKE_CASE_ = 0xe002
SCREAMING_SNAKE_CASE_ = 0xe003
SCREAMING_SNAKE_CASE_ = 0xe004
# Maps special codepoints to human-readable names.
SCREAMING_SNAKE_CASE_ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: """[CLS]""",
SEP: """[SEP]""",
BOS: """[BOS]""",
MASK: """[MASK]""",
PAD: """[PAD]""",
RESERVED: """[RESERVED]""",
}
# Maps special codepoint human-readable names to their codepoint values.
SCREAMING_SNAKE_CASE_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCamelCase__ ( __UpperCAmelCase ):
'''simple docstring'''
__snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int ,lowerCamelCase__ : Dict=chr(UpperCamelCase__ ) ,lowerCamelCase__ : Tuple=chr(UpperCamelCase__ ) ,lowerCamelCase__ : Any=chr(UpperCamelCase__ ) ,lowerCamelCase__ : str=chr(UpperCamelCase__ ) ,lowerCamelCase__ : Any=chr(UpperCamelCase__ ) ,lowerCamelCase__ : List[Any]=chr(UpperCamelCase__ ) ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : List[str]=2048 ,**lowerCamelCase__ : Optional[int] ,) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AddedToken(UpperCamelCase__ ,lstrip=UpperCamelCase__ ,rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ ,UpperCamelCase__ ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(UpperCamelCase__ ,lstrip=UpperCamelCase__ ,rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ ,UpperCamelCase__ ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(UpperCamelCase__ ,lstrip=UpperCamelCase__ ,rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ ,UpperCamelCase__ ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(UpperCamelCase__ ,lstrip=UpperCamelCase__ ,rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ ,UpperCamelCase__ ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(UpperCamelCase__ ,lstrip=UpperCamelCase__ ,rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ ,UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(UpperCamelCase__ ,lstrip=UpperCamelCase__ ,rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ ,UpperCamelCase__ ) else mask_token
super().__init__(
bos_token=UpperCamelCase__ ,eos_token=UpperCamelCase__ ,sep_token=UpperCamelCase__ ,cls_token=UpperCamelCase__ ,pad_token=UpperCamelCase__ ,mask_token=UpperCamelCase__ ,add_prefix_space=UpperCamelCase__ ,model_max_length=UpperCamelCase__ ,**UpperCamelCase__ ,)
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE = len(self._special_codepoints )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
'''simple docstring'''
return self._unicode_vocab_size
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : str ) -> int:
'''simple docstring'''
return list(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Any ) -> List[Any]:
'''simple docstring'''
try:
return ord(UpperCamelCase__ )
except TypeError:
raise ValueError(F"""invalid token: '{token}'""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase__ )
except TypeError:
raise ValueError(F"""invalid id: {index}""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
return "".join(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any = None ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : str = False ) -> Any:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ ,token_ids_a=UpperCamelCase__ ,already_has_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase__ )) + [1]
return result
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : str = None ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] = None ) -> Dict:
'''simple docstring'''
return ()
| 296 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 278 | 0 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def a ( *_lowercase : List[Any] , **_lowercase : Any ):
pass
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
a__ : Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def a ( self : str , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : Dict ):
__UpperCAmelCase = DepthEstimationPipeline(model=UpperCamelCase__ , image_processor=UpperCamelCase__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def a ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : str ):
__UpperCAmelCase = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , UpperCamelCase__ )
import datasets
__UpperCAmelCase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__UpperCAmelCase = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , UpperCamelCase__ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def a ( self : Union[str, Any] ):
pass
@slow
@require_torch
def a ( self : str ):
__UpperCAmelCase = '''Intel/dpt-large'''
__UpperCAmelCase = pipeline('''depth-estimation''' , model=UpperCamelCase__ )
__UpperCAmelCase = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
__UpperCAmelCase = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def a ( self : Optional[Any] ):
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 332 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __UpperCamelCase ( _A = 3 ):
if isinstance(_A , _A ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_A ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
lowerCAmelCase_ = QuantumRegister(_A , '''qr''' )
lowerCAmelCase_ = ClassicalRegister(_A , '''cr''' )
lowerCAmelCase_ = QuantumCircuit(_A , _A )
lowerCAmelCase_ = number_of_qubits
for i in range(_A ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_A ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_A , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_A , _A )
# simulate with 10000 shots
lowerCAmelCase_ = Aer.get_backend('''qasm_simulator''' )
lowerCAmelCase_ = execute(_A , _A , shots=10000 )
return job.result().get_counts(_A )
if __name__ == "__main__":
print(
f"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 278 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase__ ( __UpperCAmelCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] = True , SCREAMING_SNAKE_CASE_ : Optional[Any] = None , SCREAMING_SNAKE_CASE_ : Optional[Any] = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE_ : Union[str, Any] = True , SCREAMING_SNAKE_CASE_ : List[str] = None , SCREAMING_SNAKE_CASE_ : int = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : int = True , SCREAMING_SNAKE_CASE_ : str = True , SCREAMING_SNAKE_CASE_ : List[str] = None , SCREAMING_SNAKE_CASE_ : Union[str, Any] = None , **SCREAMING_SNAKE_CASE_ : List[str] , ):
super().__init__(**UpperCamelCase__ )
lowerCAmelCase_ : int = size if size is not None else {'height': 2_5_6, 'width': 2_5_6}
lowerCAmelCase_ : Tuple = get_size_dict(UpperCamelCase__ )
lowerCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase_ : Union[str, Any] = get_size_dict(UpperCamelCase__ , param_name='crop_size' )
lowerCAmelCase_ : Optional[Any] = do_resize
lowerCAmelCase_ : Optional[int] = size
lowerCAmelCase_ : List[Any] = resample
lowerCAmelCase_ : Any = do_center_crop
lowerCAmelCase_ : Optional[int] = crop_size
lowerCAmelCase_ : int = do_rescale
lowerCAmelCase_ : Union[str, Any] = rescale_factor
lowerCAmelCase_ : Union[str, Any] = do_normalize
lowerCAmelCase_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE_ : Optional[Any] = None , **SCREAMING_SNAKE_CASE_ : int , ):
lowerCAmelCase_ : Dict = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return resize(
UpperCamelCase__ , size=(size['height'], size['width']) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase_ : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(UpperCamelCase__ , size=(size['height'], size['width']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] = None , **SCREAMING_SNAKE_CASE_ : List[str] , ):
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple = None , SCREAMING_SNAKE_CASE_ : Optional[Any] = None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : int = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : List[str] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = None , SCREAMING_SNAKE_CASE_ : Any = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase_ : Any = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : int = resample if resample is not None else self.resample
lowerCAmelCase_ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : Tuple = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : Dict = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : Optional[Any] = size if size is not None else self.size
lowerCAmelCase_ : Optional[int] = get_size_dict(UpperCamelCase__ )
lowerCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ : Optional[int] = get_size_dict(UpperCamelCase__ , param_name='crop_size' )
lowerCAmelCase_ : Dict = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Dict = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowerCAmelCase_ : int = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
lowerCAmelCase_ : Optional[Any] = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
lowerCAmelCase_ : Optional[Any] = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
lowerCAmelCase_ : int = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
lowerCAmelCase_ : Tuple = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowerCAmelCase_ : List[Any] = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 224 |
from functools import lru_cache
@lru_cache
def __UpperCamelCase ( _A ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278 | 0 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str:
A: Tuple = args.log_outputs
A: Union[str, Any] = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
A: int = load_metric('''wer''' )
A: Union[str, Any] = load_metric('''cer''' )
# compute metrics
A: List[str] = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
A: int = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
A: Tuple = F"""WER: {wer_result}\nCER: {cer_result}"""
print(_A )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(_A )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A: Optional[int] = F"""log_{dataset_id}_predictions.txt"""
A: Any = F"""log_{dataset_id}_targets.txt"""
with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t:
# mapping function to write output
def write_to_file(__lowercase , __lowercase ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(_A , with_indices=_A )
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
A: Optional[Any] = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A: int = re.sub(_A , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A: List[Any] = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
A: List[Any] = ''' '''.join(text.split(_A ) )
return text
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
# load dataset
A: str = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A: Tuple = AutoFeatureExtractor.from_pretrained(args.model_id )
A: List[Any] = feature_extractor.sampling_rate
# resample audio
A: Tuple = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) )
# load eval pipeline
if args.device is None:
A: Any = 0 if torch.cuda.is_available() else -1
A: int = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__lowercase ):
A: int = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A: Tuple = prediction['''text''']
A: str = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
A: Optional[int] = dataset.map(_A , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_A , _A )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
UpperCamelCase = parser.parse_args()
main(args)
| 319 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = 384
lowerCAmelCase_ = 7
if "tiny" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 6, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "small" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "base" in model_name:
lowerCAmelCase_ = 128
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (4, 8, 16, 32)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 512
elif "large" in model_name:
lowerCAmelCase_ = 192
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (6, 12, 24, 48)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 768
# set label information
lowerCAmelCase_ = 150
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''ade20k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = SwinConfig(
embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
lowerCAmelCase_ = UperNetConfig(
backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , )
return config
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(_A )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:dim, :]
lowerCAmelCase_ = in_proj_bias[: dim]
lowerCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase_ = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase_ = in_proj_weight[
-dim :, :
]
lowerCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(4 , in_channel // 4 )
lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
lowerCAmelCase_ = model_name_to_url[model_name]
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[
'''state_dict'''
]
for name, param in state_dict.items():
print(_A , param.shape )
lowerCAmelCase_ = get_upernet_config(_A )
lowerCAmelCase_ = UperNetForSemanticSegmentation(_A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
if "bn" in key:
lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' )
lowerCAmelCase_ = val
# rename keys
lowerCAmelCase_ = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A )
if "norm" in key:
lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A )
model.load_state_dict(_A )
# verify on image
lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' )
lowerCAmelCase_ = SegformerImageProcessor()
lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowerCAmelCase_ = model(_A )
lowerCAmelCase_ = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowerCAmelCase_ = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
lowerCAmelCase_ = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
lowerCAmelCase_ = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
lowerCAmelCase_ = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowercase__: Any = len(_A )
lowercase__: Tuple = len(_A )
lowercase__: List[Any] = (
first_str_length if first_str_length > second_str_length else second_str_length
)
lowercase__: int = []
for char_count in range(_A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_A )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 177 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = args.log_outputs
lowerCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowerCAmelCase_ = load_metric('''wer''' )
lowerCAmelCase_ = load_metric('''cer''' )
# compute metrics
lowerCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowerCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowerCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}"
print(_A )
with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f:
f.write(_A )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase_ = f"log_{dataset_id}_predictions.txt"
lowerCAmelCase_ = f"log_{dataset_id}_targets.txt"
with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t:
# mapping function to write output
def write_to_file(_A , _A ):
p.write(f"{i}" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"{i}" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(_A , with_indices=_A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase_ = re.sub(_A , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowerCAmelCase_ = ''' '''.join(text.split(_A ) )
return text
def __UpperCamelCase ( _A ):
# load dataset
lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase_ = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1
lowerCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_A ):
lowerCAmelCase_ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase_ = prediction['''text''']
lowerCAmelCase_ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowerCAmelCase_ = dataset.map(_A , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_A , _A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
_A = parser.parse_args()
main(args)
| 278 | 0 |
import math
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
if not isinstance(_A , _A ):
SCREAMING_SNAKE_CASE__ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_A )
if number < 1:
SCREAMING_SNAKE_CASE__ = f"""Input value of [number={number}] must be > 0"""
raise ValueError(_A )
elif number == 1:
return 3
elif number == 2:
return 5
else:
SCREAMING_SNAKE_CASE__ = int(math.log(number // 3 , 2 ) ) + 2
SCREAMING_SNAKE_CASE__ = [3, 5]
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 3
for block in range(1 , _A ):
for _ in range(_A ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__lowerCamelCase : Tuple = 0
try:
__lowerCamelCase : Optional[int] = proth(number)
except ValueError:
print(F"""ValueError: there is no {number}th Proth number""")
continue
print(F"""The {number}th Proth number: {value}""")
| 219 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''nielsr/canine-s''': 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
_A = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_A = 0
_A = 0xe0_00
_A = 0xe0_01
_A = 0xe0_02
_A = 0xe0_03
_A = 0xe0_04
# Maps special codepoints to human-readable names.
_A = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A ( __UpperCAmelCase ):
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=False, UpperCamelCase__=2048, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token
super().__init__(
bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, model_max_length=UpperCamelCase__, **UpperCamelCase__, )
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase_ = UNICODE_VOCAB_SIZE
lowerCAmelCase_ = len(self._special_codepoints )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._unicode_vocab_size
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return list(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
try:
return ord(UpperCamelCase__ )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase__ )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return "".join(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ )
lowerCAmelCase_ = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase__ )) + [1]
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
return ()
| 278 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case ={
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 4 |
def __UpperCamelCase ( _A = 1000000 ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
lowerCAmelCase_ = {1: 1}
for inputa in range(2 , _A ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ = counter
if counter > pre_counter:
lowerCAmelCase_ = inputa
lowerCAmelCase_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 278 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : Dict = 1_0_0_0_0_0_0 ):
lowercase_ : Any = 1
lowercase_ : Any = 1
lowercase_ : str = {1: 1}
for inputa in range(2 , _A ):
lowercase_ : Optional[int] = 0
lowercase_ : List[str] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowercase_ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
lowercase_ : Dict = counter
if counter > pre_counter:
lowercase_ : Union[str, Any] = inputa
lowercase_ : Optional[Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 33 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
lowerCAmelCase_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(UpperCamelCase__ )
from datasets import load_dataset
lowerCAmelCase_ = load_dataset('''nielsr/rvlcdip-demo''' )
lowerCAmelCase_ = dataset['''train'''][0]['''image'''].convert('''RGB''' )
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
lowerCAmelCase_ = outputs.logits
lowerCAmelCase_ = torch.Size((1, 16) )
self.assertEqual(logits.shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347], device=UpperCamelCase__, dtype=torch.float, )
self.assertTrue(torch.allclose(logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
| 278 | 0 |
a ={
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 73 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = checkpoints.load_tax_checkpoint(_A )
lowerCAmelCase_ = flatten_dict(_A )
return flax_params
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowerCAmelCase_ = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCAmelCase_ = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCAmelCase_ = new_key.replace(_A , _A )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCAmelCase_ = new_key.replace(_A , _A )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A )
lowerCAmelCase_ = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A )
lowerCAmelCase_ = flax_dict[key]
lowerCAmelCase_ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCAmelCase_ = torch.from_numpy(converted_dict[key].T )
else:
lowerCAmelCase_ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __UpperCamelCase ( _A , _A , _A=False , _A=False ):
lowerCAmelCase_ = get_flax_param(_A )
if not use_large:
lowerCAmelCase_ = PixaStructVisionConfig()
lowerCAmelCase_ = PixaStructTextConfig()
else:
lowerCAmelCase_ = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCAmelCase_ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCAmelCase_ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_A )
lowerCAmelCase_ = PixaStructForConditionalGeneration(_A )
lowerCAmelCase_ = rename_and_convert_flax_params(_A )
model.load_state_dict(_A )
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowerCAmelCase_ = PixaStructImageProcessor()
lowerCAmelCase_ = PixaStructProcessor(image_processor=_A , tokenizer=_A )
if use_large:
lowerCAmelCase_ = 4096
lowerCAmelCase_ = True
# mkdir if needed
os.makedirs(_A , exist_ok=_A )
model.save_pretrained(_A )
processor.save_pretrained(_A )
print('''Model saved in {}'''.format(_A ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 278 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class a ( __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase :Dict = '''bloom'''
lowerCamelCase :Optional[Any] = ['''past_key_values''']
lowerCamelCase :Any = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self , lowerCAmelCase_=25_08_80 , lowerCAmelCase_=64 , lowerCAmelCase_=2 , lowerCAmelCase_=8 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=2 , lowerCAmelCase_=False , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1 , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Union[str, Any]:
_A = vocab_size
# Backward compatibility with n_embed kwarg
_A = kwargs.pop("""n_embed""" , UpperCamelCase__ )
_A = hidden_size if n_embed is None else n_embed
_A = n_layer
_A = n_head
_A = layer_norm_epsilon
_A = initializer_range
_A = use_cache
_A = pretraining_tp
_A = apply_residual_connection_post_layernorm
_A = hidden_dropout
_A = attention_dropout
_A = bos_token_id
_A = eos_token_id
_A = slow_but_exact
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
class a ( __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[str] = version.parse('''1.12''' )
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = "default" , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ) -> int:
super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ )
if not getattr(self._config , """pad_token_id""" , UpperCamelCase__ ):
# TODO: how to do that better?
_A = 0
@property
def UpperCAmelCase ( self ) -> List[str]:
_A = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(UpperCamelCase__ , direction="""inputs""" , inverted_values_shape=UpperCamelCase__ )
_A = {0: """batch""", 1: """past_sequence + sequence"""}
else:
_A = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCAmelCase ( self ) -> List[str]:
return self._config.n_layer
@property
def UpperCAmelCase ( self ) -> Any:
return self._config.n_head
@property
def UpperCAmelCase ( self ) -> List[str]:
return 1E-3
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> int:
_A = super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
_A = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_A , _A = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_A = seqlen + 2
_A = self._config.hidden_size // self.num_attention_heads
_A = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_A = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
_A = common_inputs["""attention_mask"""]
if self.use_past:
_A = ordered_inputs["""attention_mask"""].dtype
_A = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase ( self ) -> str:
return 13
| 180 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_A = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''', UpperCamelCase__, )
super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
| 278 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowercase ( __UpperCAmelCase , __UpperCAmelCase ):
@register_to_config
def __init__( self ,A__ = 7_6_8 ,):
super().__init__()
lowercase = nn.Parameter(torch.zeros(1 ,UpperCamelCase__))
lowercase = nn.Parameter(torch.ones(1 ,UpperCamelCase__))
def A__ ( self ,A__ = None ,A__ = None ,):
lowercase = nn.Parameter(self.mean.to(UpperCamelCase__).to(UpperCamelCase__))
lowercase = nn.Parameter(self.std.to(UpperCamelCase__).to(UpperCamelCase__))
return self
def A__ ( self ,A__):
lowercase = (embeds - self.mean) * 1.0 / self.std
return embeds
def A__ ( self ,A__):
lowercase = (embeds * self.std) + self.mean
return embeds
| 101 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCAmelCase_ = 192
lowerCAmelCase_ = 768
lowerCAmelCase_ = 12
lowerCAmelCase_ = 3
lowerCAmelCase_ = [800, 1333]
lowerCAmelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase_ = 330
lowerCAmelCase_ = 14
lowerCAmelCase_ = 6
lowerCAmelCase_ = 1320
elif "yolos_s" in yolos_name:
lowerCAmelCase_ = 384
lowerCAmelCase_ = 1536
lowerCAmelCase_ = 12
lowerCAmelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCAmelCase_ = [800, 1344]
lowerCAmelCase_ = 91
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''coco-detection-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( _A , _A , _A = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase_ = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCAmelCase_ = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( _A ):
if "backbone" in name:
lowerCAmelCase_ = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
lowerCAmelCase_ = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
lowerCAmelCase_ = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
lowerCAmelCase_ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
lowerCAmelCase_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
lowerCAmelCase_ = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowerCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
lowerCAmelCase_ = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
lowerCAmelCase_ = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
lowerCAmelCase_ = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __UpperCamelCase ( _A , _A ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(_A )
if "qkv" in key:
lowerCAmelCase_ = key.split('''.''' )
lowerCAmelCase_ = int(key_split[2] )
lowerCAmelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = val
return orig_state_dict
def __UpperCamelCase ( ):
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A , _A = False ):
lowerCAmelCase_ = get_yolos_config(_A )
# load original state_dict
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )['''model''']
# load 🤗 model
lowerCAmelCase_ = YolosForObjectDetection(_A )
model.eval()
lowerCAmelCase_ = convert_state_dict(_A , _A )
model.load_state_dict(_A )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCAmelCase_ = 800 if yolos_name != '''yolos_ti''' else 512
lowerCAmelCase_ = YolosImageProcessor(format='''coco_detection''' , size=_A )
lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.logits, outputs.pred_boxes
lowerCAmelCase_ , lowerCAmelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCAmelCase_ = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCAmelCase_ = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCAmelCase_ = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCAmelCase_ = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCAmelCase_ = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCAmelCase_ = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase_ = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCAmelCase_ = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCAmelCase_ = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCAmelCase_ = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1E-4 )
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_A )
if push_to_hub:
lowerCAmelCase_ = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
lowerCAmelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(_A , organization='''hustvl''' )
model.push_to_hub(_A , organization='''hustvl''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_ = TypeVar("""T""")
SCREAMING_SNAKE_CASE_ = TypeVar("""U""")
class UpperCamelCase__ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = key
SCREAMING_SNAKE_CASE = val
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __repr__( self : Dict ) -> List[str]:
'''simple docstring'''
return (
F"""Node: key: {self.key}, val: {self.val}, """
F"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class UpperCamelCase__ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DoubleLinkedListNode(UpperCamelCase__ ,UpperCamelCase__ )
SCREAMING_SNAKE_CASE = DoubleLinkedListNode(UpperCamelCase__ ,UpperCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.rear, self.head
def __repr__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""DoubleLinkedList"""]
SCREAMING_SNAKE_CASE = self.head
while node.next is not None:
rep.append(str(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE = node.next
rep.append(str(self.rear ) )
return ",\n ".join(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = previous
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = self.rear
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
SCREAMING_SNAKE_CASE = node.next
SCREAMING_SNAKE_CASE = node.prev
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
return node
class UpperCamelCase__ ( Generic[T, U] ):
'''simple docstring'''
__snake_case : List[Any] = {}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DoubleLinkedList()
SCREAMING_SNAKE_CASE = capacity
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = {}
def __repr__( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return (
F"""CacheInfo(hits={self.hits}, misses={self.miss}, """
F"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : Optional[int] ,lowerCamelCase__ : Dict ) -> Tuple:
'''simple docstring'''
return key in self.cache
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
if key in self.cache:
self.hits += 1
SCREAMING_SNAKE_CASE = self.cache[key]
SCREAMING_SNAKE_CASE = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(UpperCamelCase__ )
return node.val
self.miss += 1
return None
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
SCREAMING_SNAKE_CASE = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(UpperCamelCase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
SCREAMING_SNAKE_CASE = DoubleLinkedListNode(UpperCamelCase__ ,UpperCamelCase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
SCREAMING_SNAKE_CASE = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
SCREAMING_SNAKE_CASE = value
self.list.add(UpperCamelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str ,lowerCamelCase__ : Tuple = 128 ) -> Union[str, Any]:
'''simple docstring'''
def cache_decorator_inner(lowerCamelCase__ : List[str] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase__ : Optional[int] ) -> U:
if func not in cls.decorator_function_to_instance_map:
SCREAMING_SNAKE_CASE = LRUCache(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
SCREAMING_SNAKE_CASE = func(*UpperCamelCase__ )
cls.decorator_function_to_instance_map[func].put(args[0] ,UpperCamelCase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(UpperCamelCase__ ,"""cache_info""" ,UpperCamelCase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296 |
def __UpperCamelCase ( _A ):
if not numbers:
return 0
if not isinstance(_A , (list, tuple) ) or not all(
isinstance(_A , _A ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0]
for i in range(1 , len(_A ) ):
# update the maximum and minimum subarray products
lowerCAmelCase_ = numbers[i]
if number < 0:
lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now
lowerCAmelCase_ = max(_A , max_till_now * number )
lowerCAmelCase_ = min(_A , min_till_now * number )
# update the maximum product found till now
lowerCAmelCase_ = max(_A , _A )
return max_prod
| 278 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : int = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 332 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
lowerCAmelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCAmelCase_ = [4, 4, 4, 4]
lowerCAmelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
else:
lowerCAmelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCAmelCase_ = 96
elif "small" in model_name:
lowerCAmelCase_ = 96
elif "base" in model_name:
lowerCAmelCase_ = 128
elif "large" in model_name:
lowerCAmelCase_ = 192
elif "xlarge" in model_name:
lowerCAmelCase_ = 256
elif "huge" in model_name:
lowerCAmelCase_ = 352
# set label information
lowerCAmelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCAmelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = FocalNetConfig(
embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , )
return config
def __UpperCamelCase ( _A ):
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowerCAmelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowerCAmelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase_ = name.replace('''head''' , '''classifier''' )
else:
lowerCAmelCase_ = '''focalnet.''' + name
return name
def __UpperCamelCase ( _A , _A , _A=False ):
# fmt: off
lowerCAmelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCAmelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _A )
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
lowerCAmelCase_ = val
lowerCAmelCase_ = get_focalnet_config(_A )
lowerCAmelCase_ = FocalNetForImageClassification(_A )
model.eval()
# load state dict
model.load_state_dict(_A )
# verify conversion
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = BitImageProcessor(
do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , )
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' )
lowerCAmelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 )
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
_A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 0 |
"""simple docstring"""
import os
from collections.abc import Iterator
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] = "." ) -> List[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(_A ):
lowerCAmelCase_ : Dict = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_A )[1] in (".py", ".ipynb"):
yield os.path.join(_A , _A ).lstrip('./' )
def UpperCamelCase_ ( lowerCAmelCase__ : List[str] ) -> List[Any]:
"""simple docstring"""
return f"{i * ' '}*" if i else "\n##"
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_A ) or old_parts[i] != new_part) and new_part:
print(f"{md_prefix(_A )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def UpperCamelCase_ ( lowerCAmelCase__ : int = "." ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : Tuple = ''
for filepath in sorted(good_file_paths(_A ) ):
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = os.path.split(_A )
if filepath != old_path:
lowerCAmelCase_ : Optional[int] = print_path(_A , _A )
lowerCAmelCase_ : Optional[Any] = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase_ : int = f"{filepath}/{filename}".replace(' ' , '%20' )
lowerCAmelCase_ : Dict = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(f"{md_prefix(_A )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md(""".""")
| 224 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A )
lowerCAmelCase_ = emb.weight.data
return lin_layer
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )
lowerCAmelCase_ = Namespace(**checkpoint['''cfg''']['''model'''] )
lowerCAmelCase_ = checkpoint['''model''']
remove_ignore_keys_(_A )
lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowerCAmelCase_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
lowerCAmelCase_ = XGLMConfig(
vocab_size=_A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCAmelCase_ = XGLMForCausalLM(_A )
lowerCAmelCase_ = model.load_state_dict(_A , strict=_A )
print(_A )
lowerCAmelCase_ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A = parser.parse_args()
_A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 278 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase_ ( __UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = 42
UpperCamelCase_ : Any = 42
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 , SCREAMING_SNAKE_CASE_ : Any = 20_00 , SCREAMING_SNAKE_CASE_ : Optional[Any] = None , SCREAMING_SNAKE_CASE_ : Any = "pil" , SCREAMING_SNAKE_CASE_ : List[str] = True , **SCREAMING_SNAKE_CASE_ : Dict , ) -> Optional[int]:
'''simple docstring'''
A: Optional[int] = self.unet.config.sample_size
A: List[Any] = (batch_size, 3, img_size, img_size)
A: Optional[int] = self.unet
A: Union[str, Any] = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ ) * self.scheduler.init_noise_sigma
A: Any = sample.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase__ )
self.scheduler.set_sigmas(UpperCamelCase__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A: int = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
A: Any = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
A: Optional[Any] = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
# prediction step
A: Dict = model(UpperCamelCase__ , UpperCamelCase__ ).sample
A: List[str] = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
A , A: int = output.prev_sample, output.prev_sample_mean
A: Tuple = sample_mean.clamp(0 , 1 )
A: Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A: List[Any] = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 319 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A = dict(zip(vocab, range(len(vocab))))
_A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A = Path(tmpdirname)
_A = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
_A = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 278 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase (__UpperCAmelCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , '''num_heads''' ) )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=64 , _UpperCAmelCase=3 , _UpperCAmelCase=[16, 48, 96] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=2 , ):
lowercase__: int = parent
lowercase__: Any = batch_size
lowercase__: List[str] = image_size
lowercase__: List[Any] = patch_sizes
lowercase__: Any = patch_stride
lowercase__: Optional[int] = patch_padding
lowercase__: Tuple = is_training
lowercase__: str = use_labels
lowercase__: int = num_labels
lowercase__: Optional[int] = num_channels
lowercase__: Dict = embed_dim
lowercase__: Optional[Any] = num_heads
lowercase__: List[str] = stride_kv
lowercase__: int = depth
lowercase__: List[str] = cls_token
lowercase__: Any = attention_drop_rate
lowercase__: Tuple = initializer_range
lowercase__: Union[str, Any] = layer_norm_eps
def _snake_case ( self ):
lowercase__: int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__: List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
lowercase__: List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase__: List[Any] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: int = TFCvtModel(config=UpperCamelCase__ )
lowercase__: Any = model(UpperCamelCase__ , training=UpperCamelCase__ )
lowercase__: Optional[Any] = (self.image_size, self.image_size)
lowercase__, lowercase__: Tuple = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowercase__: str = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowercase__: List[str] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = self.num_labels
lowercase__: Any = TFCvtForImageClassification(UpperCamelCase__ )
lowercase__: List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self ):
lowercase__: Dict = self.prepare_config_and_inputs()
lowercase__, lowercase__, lowercase__: Tuple = config_and_inputs
lowercase__: Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase (__UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :int = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_UpperCAmelCase :Tuple = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :str = False
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :Dict = False
_UpperCAmelCase :Tuple = False
def _snake_case ( self ):
lowercase__: Optional[int] = TFCvtModelTester(self )
lowercase__: str = TFCvtConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def _snake_case ( self ):
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def _snake_case ( self ):
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def _snake_case ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def _snake_case ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def _snake_case ( self ):
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def _snake_case ( self ):
lowercase__: Optional[int] = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(UpperCamelCase__ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def _snake_case ( self ):
lowercase__, lowercase__: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Dict = model_class(UpperCamelCase__ )
lowercase__: Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__: str = [*signature.parameters.keys()]
lowercase__: Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _snake_case ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = model_class(UpperCamelCase__ )
lowercase__: Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowercase__: Any = outputs.hidden_states
lowercase__: Dict = len(self.model_tester.depth )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowercase__, lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: List[str] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__: List[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _snake_case ( self ):
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def _snake_case ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: Optional[Any] = TFCvtModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
lowercase__: Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _snake_case ( self ):
lowercase__: Optional[int] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__: List[str] = self.default_image_processor
lowercase__: List[Any] = prepare_img()
lowercase__: List[str] = image_processor(images=UpperCamelCase__ , return_tensors='''tf''' )
# forward pass
lowercase__: Optional[Any] = model(**UpperCamelCase__ )
# verify the logits
lowercase__: Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowercase__: Union[str, Any] = tf.constant([0.9_285, 0.9_015, -0.3_150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCamelCase__ , atol=1e-4 ) )
| 177 |
import argparse
from collections import defaultdict
import yaml
_A = '''docs/source/en/_toctree.yml'''
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = defaultdict(_A )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCAmelCase_ = [key for key, value in counts.items() if value > 1]
lowerCAmelCase_ = []
for duplicate_key in duplicates:
lowerCAmelCase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(_A ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(_A , key=lambda _A : s["title"].lower() )
def __UpperCamelCase ( _A=False ):
with open(_A , encoding='''utf-8''' ) as f:
lowerCAmelCase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase_ = content[api_idx]['''sections''']
# Then to the model doc
lowerCAmelCase_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCAmelCase_ = api_doc[model_idx]['''sections''']
lowerCAmelCase_ = [(idx, section) for idx, section in enumerate(_A ) if '''sections''' in section]
lowerCAmelCase_ = False
for idx, modality_doc in modalities_docs:
lowerCAmelCase_ = modality_doc['''sections''']
lowerCAmelCase_ = clean_model_doc_toc(_A )
if old_modality_doc != new_modality_doc:
lowerCAmelCase_ = True
if overwrite:
lowerCAmelCase_ = new_modality_doc
if diff:
if overwrite:
lowerCAmelCase_ = model_doc
lowerCAmelCase_ = api_doc
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_A = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 278 | 0 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __snake_case ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase_ = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __a ( self : int , _lowercase : List[Any] , _lowercase : List[str]=0 ):
"""simple docstring"""
if str(UpperCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , )
SCREAMING_SNAKE_CASE__ = floats_tensor(control_image.shape , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) )
SCREAMING_SNAKE_CASE__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __a ( self : Optional[Any] ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __a ( self : Optional[int] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __a ( self : List[Any] ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase_ = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __a ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_lowercase : Union[str, Any] ):
if isinstance(UpperCamelCase__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ = MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE__ = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __a ( self : Any , _lowercase : Tuple , _lowercase : int=0 ):
"""simple docstring"""
if str(UpperCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , ),
]
SCREAMING_SNAKE_CASE__ = floats_tensor(control_image[0].shape , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) )
SCREAMING_SNAKE_CASE__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = 10.0
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = steps
SCREAMING_SNAKE_CASE__ = scale
SCREAMING_SNAKE_CASE__ = pipe(**UpperCamelCase__ )[0]
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = steps
SCREAMING_SNAKE_CASE__ = scale
SCREAMING_SNAKE_CASE__ = pipe(**UpperCamelCase__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = steps
SCREAMING_SNAKE_CASE__ = scale
SCREAMING_SNAKE_CASE__ = pipe(**UpperCamelCase__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = steps
SCREAMING_SNAKE_CASE__ = scale
SCREAMING_SNAKE_CASE__ = pipe(**UpperCamelCase__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __a ( self : List[Any] ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __a ( self : str ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __a ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(UpperCamelCase__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
SCREAMING_SNAKE_CASE__ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=UpperCamelCase__ , controlnet=UpperCamelCase__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = """evil space-punk bird"""
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((5_12, 5_12) )
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((5_12, 5_12) )
SCREAMING_SNAKE_CASE__ = pipe(
UpperCamelCase__ , UpperCamelCase__ , control_image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 219 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A ( __UpperCAmelCase ):
__snake_case = (UnCLIPScheduler,)
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**UpperCamelCase__ )
return config
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = 0.5
assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase_ = None
else:
lowerCAmelCase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
| 278 | 0 |
'''simple docstring'''
from statistics import mean
import numpy as np
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : List[Any] ):
lowerCAmelCase = 0
# Number of processes finished
lowerCAmelCase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCAmelCase = [0] * no_of_process
# List to include calculation results
lowerCAmelCase = [0] * no_of_process
# Sort by arrival time.
lowerCAmelCase = [burst_time[i] for i in np.argsort(_A )]
lowerCAmelCase = [process_name[i] for i in np.argsort(_A )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCAmelCase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCAmelCase = arrival_time[i]
lowerCAmelCase = 0
# Index showing the location of the process being performed
lowerCAmelCase = 0
# Saves the current response ratio.
lowerCAmelCase = 0
for i in range(0 , _A ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCAmelCase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCAmelCase = temp
lowerCAmelCase = i
# Calculate the turn around time
lowerCAmelCase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCAmelCase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] ):
lowerCAmelCase = [0] * no_of_process
for i in range(0 , _A ):
lowerCAmelCase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__snake_case =5
__snake_case =["""A""", """B""", """C""", """D""", """E"""]
__snake_case =[1, 2, 3, 4, 5]
__snake_case =[1, 2, 3, 4, 5]
__snake_case =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__snake_case =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 4 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = torch.device('''cpu''')
def __UpperCamelCase ( ):
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
return im
def __UpperCamelCase ( _A ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(_A )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
for k in state_dict.keys():
lowerCAmelCase_ = k
if ".pwconv" in k:
lowerCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
lowerCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
lowerCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
lowerCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
lowerCAmelCase_ = k_new.split('''.''' )
if ls[2].isdigit():
lowerCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
lowerCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase_ = 1000
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCAmelCase_ = [3, 3, 6, 4]
lowerCAmelCase_ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCAmelCase_ = [3, 3, 9, 6]
lowerCAmelCase_ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCAmelCase_ = [4, 3, 10, 5]
lowerCAmelCase_ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCAmelCase_ = [4, 4, 12, 6]
lowerCAmelCase_ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , check_hash=_A )
else:
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = create_rename_keys(_A )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_A , _A , _A )
# load HuggingFace model
lowerCAmelCase_ = SwiftFormerForImageClassification(_A ).eval()
hf_model.load_state_dict(_A )
# prepare test inputs
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' )
# compare outputs from both models
lowerCAmelCase_ = get_expected_output(_A )
lowerCAmelCase_ = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 )
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(_A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
_A = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 278 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''distilbert-base-uncased''': 5_1_2,
'''distilbert-base-uncased-distilled-squad''': 5_1_2,
'''distilbert-base-cased''': 5_1_2,
'''distilbert-base-cased-distilled-squad''': 5_1_2,
'''distilbert-base-german-cased''': 5_1_2,
'''distilbert-base-multilingual-cased''': 5_1_2,
}
lowerCAmelCase_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ : str = ['''input_ids''', '''attention_mask''']
lowerCamelCase_ : List[Any] = DistilBertTokenizer
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__="[UNK]" , __magic_name__="[SEP]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
snake_case_ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __magic_name__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __magic_name__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __magic_name__ ) != tokenize_chinese_chars
):
snake_case_ : Tuple = getattr(__magic_name__ , normalizer_state.pop('''type''' ) )
snake_case_ : Dict = do_lower_case
snake_case_ : Dict = strip_accents
snake_case_ : Tuple = tokenize_chinese_chars
snake_case_ : List[str] = normalizer_class(**__magic_name__ )
snake_case_ : str = do_lower_case
def lowerCamelCase (self , __magic_name__ , __magic_name__=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 279 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
lowerCAmelCase_ = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
lowerCAmelCase_ = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Dict = (images / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Dict = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case_ : int = numpy_to_pil(_UpperCamelCase )
return images
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
if images.ndim == 3:
snake_case_ : Optional[Any] = images[None, ...]
snake_case_ : Any = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
snake_case_ : str = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
snake_case_ : List[Any] = [Image.fromarray(_UpperCamelCase ) for image in images]
return pil_images
| 279 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
snake_case_ : List[str] = int(_UpperCamelCase )
snake_case_ : int = int(_UpperCamelCase )
snake_case_ : list[str] = []
for temp in range(int(_UpperCamelCase ) ):
series.append(f'''1 / {pow(temp + 1 , int(_UpperCamelCase ) )}''' if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = int(input('''Enter the last number (nth term) of the P-Series'''))
lowerCAmelCase_ = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 279 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Any = BioGptTokenizer
lowerCamelCase_ : Optional[Any] = False
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
snake_case_ : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__magic_name__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : str = '''lower newer'''
snake_case_ : Dict = '''lower newer'''
return input_text, output_text
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file )
snake_case_ : Union[str, Any] = '''lower'''
snake_case_ : Optional[int] = ['''low''', '''er</w>''']
snake_case_ : Any = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
snake_case_ : Optional[int] = tokens + ['''<unk>''']
snake_case_ : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__magic_name__ )
snake_case_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__magic_name__ )
snake_case_ : str = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 279 | 1 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowerCAmelCase_ = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(_UpperCamelCase ) , version.parse(_UpperCamelCase ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = None ) -> None:
"""simple docstring"""
snake_case_ : Union[str, Any] = f'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , _UpperCamelCase ):
snake_case_ , snake_case_ , snake_case_ : str = requirement, None, None
else:
snake_case_ : List[str] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f''' got {requirement}''' )
snake_case_ , snake_case_ : List[str] = match[0]
snake_case_ : str = want_full.split(''',''' ) # there could be multiple requirements
snake_case_ : Any = {}
for w in want_range:
snake_case_ : int = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f''' but got {requirement}''' )
snake_case_ , snake_case_ : Optional[int] = match[0]
snake_case_ : int = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
snake_case_ : Optional[Any] = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return
# check if any version is installed
try:
snake_case_ : int = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : str = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase , _UpperCamelCase )
| 279 |
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, list[float]]:
"""simple docstring"""
snake_case_ : Dict = list(range(len(_UpperCamelCase ) ) )
snake_case_ : Dict = [v / w for v, w in zip(_UpperCamelCase , _UpperCamelCase )]
index.sort(key=lambda _UpperCamelCase : ratio[i] , reverse=_UpperCamelCase )
snake_case_ : float = 0
snake_case_ : list[float] = [0] * len(_UpperCamelCase )
for i in index:
if weight[i] <= capacity:
snake_case_ : Dict = 1
max_value += value[i]
capacity -= weight[i]
else:
snake_case_ : Union[str, Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase_ ( _UpperCamelCase ) -> YolosConfig:
"""simple docstring"""
snake_case_ : int = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
snake_case_ : Any = 192
snake_case_ : Dict = 768
snake_case_ : Dict = 12
snake_case_ : List[Any] = 3
snake_case_ : Dict = [800, 1_333]
snake_case_ : List[str] = False
elif yolos_name == "yolos_s_dWr":
snake_case_ : Optional[int] = 330
snake_case_ : List[str] = 14
snake_case_ : str = 6
snake_case_ : List[str] = 1_320
elif "yolos_s" in yolos_name:
snake_case_ : Union[str, Any] = 384
snake_case_ : Dict = 1_536
snake_case_ : int = 12
snake_case_ : List[str] = 6
elif "yolos_b" in yolos_name:
snake_case_ : List[Any] = [800, 1_344]
snake_case_ : Optional[Any] = 91
snake_case_ : str = '''huggingface/label-files'''
snake_case_ : Optional[Any] = '''coco-detection-id2label.json'''
snake_case_ : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : Dict = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : Optional[Any] = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ) -> List[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ : Any = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
snake_case_ : Dict = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : List[Any] = in_proj_weight[: config.hidden_size, :]
snake_case_ : Optional[int] = in_proj_bias[: config.hidden_size]
snake_case_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ : str = in_proj_weight[-config.hidden_size :, :]
snake_case_ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
if "backbone" in name:
snake_case_ : List[str] = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
snake_case_ : List[str] = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
snake_case_ : int = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
snake_case_ : Optional[int] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
snake_case_ : Tuple = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case_ : Dict = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
snake_case_ : Any = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
snake_case_ : Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case_ : int = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case_ : str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case_ : Dict = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case_ : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case_ : Any = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
snake_case_ : Any = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
snake_case_ : List[Any] = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
snake_case_ : Union[str, Any] = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case_ : List[str] = orig_state_dict.pop(_UpperCamelCase )
if "qkv" in key:
snake_case_ : Optional[int] = key.split('''.''' )
snake_case_ : Optional[int] = int(key_split[2] )
snake_case_ : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
snake_case_ : Dict = val[:dim, :]
snake_case_ : Dict = val[
dim : dim * 2, :
]
snake_case_ : List[str] = val[-dim:, :]
else:
snake_case_ : int = val[:dim]
snake_case_ : Optional[int] = val[dim : dim * 2]
snake_case_ : Optional[int] = val[-dim:]
else:
snake_case_ : List[Any] = val
return orig_state_dict
def lowerCamelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
snake_case_ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ : int = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ) -> List[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = get_yolos_config(_UpperCamelCase )
# load original state_dict
snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' )['''model''']
# load 🤗 model
snake_case_ : int = YolosForObjectDetection(_UpperCamelCase )
model.eval()
snake_case_ : str = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
snake_case_ : str = 800 if yolos_name != '''yolos_ti''' else 512
snake_case_ : Tuple = YolosImageProcessor(format='''coco_detection''' , size=_UpperCamelCase )
snake_case_ : Any = image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case_ : Tuple = model(**_UpperCamelCase )
snake_case_ , snake_case_ : Optional[Any] = outputs.logits, outputs.pred_boxes
snake_case_ , snake_case_ : List[Any] = None, None
if yolos_name == "yolos_ti":
snake_case_ : Optional[int] = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
snake_case_ : Union[str, Any] = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
snake_case_ : str = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
snake_case_ : int = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
snake_case_ : str = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
snake_case_ : int = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
snake_case_ : List[str] = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
snake_case_ : Optional[int] = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
snake_case_ : Optional[int] = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
snake_case_ : Optional[Any] = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , _UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _UpperCamelCase , atol=1E-4 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
snake_case_ : Optional[Any] = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
snake_case_ : List[str] = model_mapping[yolos_name]
image_processor.push_to_hub(_UpperCamelCase , organization='''hustvl''' )
model.push_to_hub(_UpperCamelCase , organization='''hustvl''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 279 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=32 , __magic_name__=True , ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Union[str, Any] = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : int = min_resolution
snake_case_ : Any = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : str = size_divisor
snake_case_ : Optional[Any] = do_rescale
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = GLPNImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = GLPNImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size_divisor''' ) )
self.assertTrue(hasattr(__magic_name__ , '''resample''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_rescale''' ) )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 279 | 1 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
lowerCAmelCase_ = {
'''AI-Sweden/gpt-sw3-126m''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-350m''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-1.6b''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-6.7b''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-20b''': 2_0_4_8,
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Tuple = ['''input_ids''', '''attention_mask''']
def __init__(self , __magic_name__ , __magic_name__=False , __magic_name__=False , __magic_name__=False , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = None , **__magic_name__ , ) -> None:
'''simple docstring'''
snake_case_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case_ : Union[str, Any] = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
snake_case_ : Dict = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
snake_case_ : str = '''<|endoftext|>''' if eos_token is None else eos_token
snake_case_ : str = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
snake_case_ : Dict = unk_token if pad_token is None else pad_token
snake_case_ : List[str] = eos_token if bos_token is None else bos_token
else:
snake_case_ : Tuple = '''<pad>''' if pad_token is None else pad_token
snake_case_ : Optional[int] = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__magic_name__ , remove_space=__magic_name__ , keep_accents=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
snake_case_ : Dict = do_lower_case
snake_case_ : Optional[Any] = remove_space
snake_case_ : Optional[Any] = keep_accents
snake_case_ : Tuple = vocab_file
snake_case_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
# Used for whitespace normalization in input texts
# fmt : off
snake_case_ : List[Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
snake_case_ : str = re.compile(
F'''[{"".join(map(__magic_name__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__(self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.__dict__.copy()
snake_case_ : Tuple = None
return state
def __setstate__(self , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ : Tuple = {}
snake_case_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.non_printing_characters_re.sub('''''' , __magic_name__ )
# Normalize whitespaces
snake_case_ : Optional[int] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
snake_case_ : Union[str, Any] = unicodedata.normalize('''NFC''' , __magic_name__ )
return text
def lowerCamelCase (self , __magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.preprocess_text(__magic_name__ )
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
return self.sp_model.IdToPiece(__magic_name__ )
@staticmethod
def lowerCamelCase (__magic_name__ ) -> str:
'''simple docstring'''
return out_string
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = []
snake_case_ : Dict = ''''''
snake_case_ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__magic_name__ ) + token
snake_case_ : Dict = True
snake_case_ : List[Any] = []
else:
current_sub_tokens.append(__magic_name__ )
snake_case_ : Dict = False
out_string += self.sp_model.decode(__magic_name__ )
return out_string
def lowerCamelCase (self ) -> Dict[str, int]:
'''simple docstring'''
snake_case_ : str = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__magic_name__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : List[str] = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , '''wb''' ) as fi:
snake_case_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def lowerCamelCase (self , __magic_name__ , __magic_name__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
'''simple docstring'''
if isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : List[str] = self.preprocess_text(__magic_name__ )
snake_case_ : List[str] = self.sp_model.encode(__magic_name__ )
else:
snake_case_ : Dict = [self.preprocess_text(__magic_name__ ) for t in text]
snake_case_ : Union[str, Any] = self.sp_model.encode(__magic_name__ )
if return_tensors is True or return_tensors == "pt":
snake_case_ : List[Any] = torch.tensor(__magic_name__ )
return token_ids
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
return self.sp_model.decode(__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> List[int]:
'''simple docstring'''
snake_case_ : Dict = [F'''User: {text}''' if is_user else F'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
snake_case_ : Any = (
F'''{self.eos_token}{self.bos_token}''' + F'''{self.bos_token}'''.join(__magic_name__ ) + F'''{self.bos_token}Bot:'''
)
return self.encode(text=__magic_name__ )
| 279 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 279 | 1 |
import os
import string
import sys
lowerCAmelCase_ = 1 << 8
lowerCAmelCase_ = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
lowerCAmelCase_ = KEYMAP['''up''']
lowerCAmelCase_ = KEYMAP['''left''']
if sys.platform == "win32":
lowerCAmelCase_ = []
lowerCAmelCase_ = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
lowerCAmelCase_ = ord(str(i))
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
if os.name == "nt":
import msvcrt
snake_case_ : int = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_UpperCamelCase ) == 0:
# Read the keystroke
snake_case_ : List[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case_ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case_ : int = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(_UpperCamelCase )
if ord(_UpperCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case_ : int = chr(KEYMAP['''esc'''] )
except KeyError:
snake_case_ : Tuple = cha[1]
else:
snake_case_ : Dict = ch.decode(_UpperCamelCase )
else:
snake_case_ : Any = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case_ : Tuple = sys.stdin.fileno()
snake_case_ : List[str] = termios.tcgetattr(_UpperCamelCase )
try:
tty.setraw(_UpperCamelCase )
snake_case_ : str = sys.stdin.read(1 )
finally:
termios.tcsetattr(_UpperCamelCase , termios.TCSADRAIN , _UpperCamelCase )
return ch
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
snake_case_ : str = get_raw_chars()
if ord(_UpperCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_UpperCamelCase ) == KEYMAP["esc"]:
snake_case_ : Optional[int] = get_raw_chars()
if ord(_UpperCamelCase ) == KEYMAP["mod_int"]:
snake_case_ : List[str] = get_raw_chars()
if ord(_UpperCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_UpperCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_UpperCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 279 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase_ = float('''nan''')
class __lowerCAmelCase :
def __init__(self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = sys.stdout
snake_case_ : int = open(__magic_name__ , '''a''' )
def __getattr__(self , __magic_name__ ) -> Dict:
'''simple docstring'''
return getattr(self.stdout , __magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
self.stdout.write(__magic_name__ )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , __magic_name__ , 0 , re.M ) )
def lowerCamelCase_ ( _UpperCamelCase=80 , _UpperCamelCase=False ) -> str:
"""simple docstring"""
snake_case_ : str = []
# deal with critical env vars
snake_case_ : int = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
snake_case_ : Optional[int] = os.environ.get(_UpperCamelCase , _UpperCamelCase )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
snake_case_ : Optional[int] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(_UpperCamelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
snake_case_ : Dict = []
snake_case_ : Dict = ''''''
while len(_UpperCamelCase ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCamelCase )
snake_case_ : List[Any] = ''''''
return "\\\n".join(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : str = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
snake_case_ : Optional[Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
snake_case_ : int = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , )
snake_case_ : Tuple = subprocess.run(_UpperCamelCase , capture_output=_UpperCamelCase , text=_UpperCamelCase )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
snake_case_ : Any = variation.replace(''' ''' , '''-''' )
with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stdout.txt''' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stderr.txt''' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f:
snake_case_ : str = json.load(_UpperCamelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Tuple:
"""simple docstring"""
snake_case_ : Tuple = []
snake_case_ : Any = []
snake_case_ : int = f'''{id}: {variation:<{longest_variation_len}}'''
snake_case_ : Optional[Any] = f'''{preamble}: '''
snake_case_ : Optional[int] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCamelCase ) , desc=_UpperCamelCase , leave=_UpperCamelCase ):
snake_case_ : int = process_run_single(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case_ : List[str] = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCamelCase ):
metrics.append(_UpperCamelCase )
results.append(_UpperCamelCase )
outcome += "✓"
else:
outcome += "✘"
snake_case_ : Any = f'''\33[2K\r{outcome}'''
if len(_UpperCamelCase ) > 0:
snake_case_ : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
snake_case_ : Any = round(mean_metrics[target_metric_key] , 2 )
snake_case_ : List[str] = f'''{outcome} {mean_target}'''
if len(_UpperCamelCase ) > 1:
results_str += f''' {tuple(round(_UpperCamelCase , 2 ) for x in results )}'''
print(_UpperCamelCase )
snake_case_ : Optional[int] = variation
return mean_metrics
else:
print(_UpperCamelCase )
return {variation_key: variation, target_metric_key: nan}
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Any = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return f'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : str = pd.DataFrame(_UpperCamelCase )
snake_case_ : Optional[int] = '''variation'''
snake_case_ : Union[str, Any] = '''diff_%'''
snake_case_ : Optional[int] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
snake_case_ : Optional[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCamelCase ):
# as a fallback, use the minimal value as the sentinel
snake_case_ : Any = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCamelCase ):
snake_case_ : Dict = df.apply(
lambda _UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
snake_case_ : Dict = [variation_key, target_metric_key, diff_key, *report_metric_keys]
snake_case_ : int = df.reindex(_UpperCamelCase , axis='''columns''' ) # reorder cols
# capitalize
snake_case_ : Optional[int] = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
snake_case_ : Any = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
snake_case_ : int = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
snake_case_ : Tuple = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )]
print('''\n\n'''.join(_UpperCamelCase ) )
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=_UpperCamelCase , type=_UpperCamelCase , nargs='''+''' , required=_UpperCamelCase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=_UpperCamelCase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=_UpperCamelCase , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=_UpperCamelCase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=_UpperCamelCase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
snake_case_ : Tuple = parser.parse_args()
snake_case_ : Optional[Any] = args.output_dir
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
snake_case_ : Optional[int] = get_base_command(_UpperCamelCase , _UpperCamelCase )
# split each dimension into its --foo variations
snake_case_ : Optional[int] = [list(map(str.strip , re.split(R'''\|''' , _UpperCamelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
snake_case_ : List[str] = list(map(str.strip , map(''' '''.join , itertools.product(*_UpperCamelCase ) ) ) )
snake_case_ : Optional[int] = max(len(_UpperCamelCase ) for x in variations )
# split wanted keys
snake_case_ : int = args.report_metric_keys.split()
# capture prints into a log file for convenience
snake_case_ : str = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
snake_case_ : Tuple = Tee(_UpperCamelCase )
print(f'''\n*** Running {len(_UpperCamelCase )} benchmarks:''' )
print(f'''Base command: {" ".join(_UpperCamelCase )}''' )
snake_case_ : List[Any] = '''variation'''
snake_case_ : Tuple = []
for id, variation in enumerate(tqdm(_UpperCamelCase , desc='''Total completion: ''' , leave=_UpperCamelCase ) ):
snake_case_ : Optional[Any] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.repeat_times , _UpperCamelCase , args.verbose , ) )
process_results(_UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.base_variation , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279 | 1 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def lowerCamelCase_ ( _UpperCamelCase ) -> str: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : int
lowerCamelCase_ : str
class __lowerCAmelCase ( _a ):
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = {}
snake_case_ : Tuple = []
snake_case_ : Optional[Any] = 1
snake_case_ : str = [1, 2]
snake_case_ : str = {'''a''': 1, '''b''': 2}
snake_case_ : int = {'''a''': [1, 2], '''b''': [3, 4]}
snake_case_ : List[Any] = {'''a''': {'''1''': 1}, '''b''': 2}
snake_case_ : Optional[int] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
snake_case_ : str = {}
snake_case_ : int = []
snake_case_ : Any = 2
snake_case_ : Optional[Any] = [2, 3]
snake_case_ : Tuple = {'''a''': 2, '''b''': 3}
snake_case_ : Any = {'''a''': [2, 3], '''b''': [4, 5]}
snake_case_ : Union[str, Any] = {'''a''': {'''1''': 2}, '''b''': 3}
snake_case_ : Dict = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
snake_case_ : Tuple = 2
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
snake_case_ : Dict = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
snake_case_ : Tuple = {'''a''': 2, '''b''': 0, '''c''': 2}
snake_case_ : Optional[Any] = {
'''a''': np.eye(2 ).astype(__magic_name__ ),
'''b''': np.zeros(3 ).astype(__magic_name__ ),
'''c''': np.ones(2 ).astype(__magic_name__ ),
}
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ ) , __magic_name__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ , num_proc=__magic_name__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__magic_name__ ): # can't pickle a local lambda
map_nested(lambda __magic_name__ : x + 1 , __magic_name__ , num_proc=__magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : str = {'''a''': 1, '''b''': 2}
snake_case_ : Optional[int] = {'''a''': 3, '''b''': 4}
snake_case_ : Dict = {'''a''': 5, '''b''': 6}
snake_case_ : str = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__magic_name__ , __magic_name__ , __magic_name__ ) ) , __magic_name__ )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
class __lowerCAmelCase :
lowerCamelCase_ : Dict = '''bar'''
snake_case_ : List[Any] = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(__magic_name__ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
snake_case_ : str = {f'''{i}''': i for i in range(_UpperCamelCase )}
snake_case_ : Tuple = map_nested(lambda _UpperCamelCase : x + 10 , _UpperCamelCase , num_proc=_UpperCamelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __lowerCAmelCase ( _a ):
@require_tf
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
snake_case_ : str = layers.Dense(2 )
def gen_random_output():
snake_case_ : str = tf.random.uniform((1, 3) )
return model(__magic_name__ ).numpy()
with temp_seed(42 , set_tensorflow=__magic_name__ ):
snake_case_ : int = gen_random_output()
with temp_seed(42 , set_tensorflow=__magic_name__ ):
snake_case_ : Optional[int] = gen_random_output()
snake_case_ : Any = gen_random_output()
np.testing.assert_equal(__magic_name__ , __magic_name__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
import torch
def gen_random_output():
snake_case_ : Dict = torch.nn.Linear(3 , 2 )
snake_case_ : List[Any] = torch.rand(1 , 3 )
return model(__magic_name__ ).detach().numpy()
with temp_seed(42 , set_pytorch=__magic_name__ ):
snake_case_ : Optional[Any] = gen_random_output()
with temp_seed(42 , set_pytorch=__magic_name__ ):
snake_case_ : str = gen_random_output()
snake_case_ : str = gen_random_output()
np.testing.assert_equal(__magic_name__ , __magic_name__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
snake_case_ : Optional[int] = gen_random_output()
with temp_seed(42 ):
snake_case_ : List[Any] = gen_random_output()
snake_case_ : Any = gen_random_output()
np.testing.assert_equal(__magic_name__ , __magic_name__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''' , [{}] )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : Any = NestedDataStructure(_UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[int] = NestedDataStructure(_UpperCamelCase ).flatten()
assert output == expected_output
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : Dict = A(x=1 , y='''foobar''' )
snake_case_ : List[Any] = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(_UpperCamelCase ) == expected_output
snake_case_ : Optional[Any] = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]}
snake_case_ : Optional[int] = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(_UpperCamelCase ) == expected_output
with pytest.raises(_UpperCamelCase ):
asdict([1, A(x=10 , y='''foo''' )] )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return text.split()
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
with Pool(2 ) as pool:
snake_case_ : str = list(iflatmap_unordered(_UpperCamelCase , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(_UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
snake_case_ : Optional[int] = list(iflatmap_unordered(_UpperCamelCase , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(_UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
snake_case_ : Tuple = []
for yield_time, content in iflatmap_unordered(
_UpperCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_UpperCamelCase )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(_UpperCamelCase ) == 4
| 279 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase_ = CLIPImageProcessor()
lowerCAmelCase_ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
lowerCAmelCase_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 279 | 1 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 279 |
from math import factorial
lowerCAmelCase_ = {str(digit): factorial(digit) for digit in range(1_0)}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCamelCase ) )
def lowerCamelCase_ ( _UpperCamelCase = 60 , _UpperCamelCase = 1_000_000 ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
snake_case_ : Optional[Any] = 0
# the cached sizes of the previous chains
snake_case_ : dict[int, int] = {}
for start_chain_element in range(1 , _UpperCamelCase ):
# The temporary set will contain the elements of the chain
snake_case_ : List[str] = set()
snake_case_ : List[Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
snake_case_ : Any = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_UpperCamelCase )
chain_set_length += 1
snake_case_ : List[Any] = digit_factorial_sum(_UpperCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
snake_case_ : List[str] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 279 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Dict = '''swin2sr'''
lowerCamelCase_ : Union[str, Any] = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__(self , __magic_name__=64 , __magic_name__=1 , __magic_name__=3 , __magic_name__=180 , __magic_name__=[6, 6, 6, 6, 6, 6] , __magic_name__=[6, 6, 6, 6, 6, 6] , __magic_name__=8 , __magic_name__=2.0 , __magic_name__=True , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__="gelu" , __magic_name__=False , __magic_name__=0.02 , __magic_name__=1e-5 , __magic_name__=2 , __magic_name__=1.0 , __magic_name__="1conv" , __magic_name__="pixelshuffle" , **__magic_name__ , ) -> str:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : str = image_size
snake_case_ : Union[str, Any] = patch_size
snake_case_ : str = num_channels
snake_case_ : List[Any] = embed_dim
snake_case_ : List[str] = depths
snake_case_ : Any = len(__magic_name__ )
snake_case_ : Union[str, Any] = num_heads
snake_case_ : List[str] = window_size
snake_case_ : Optional[int] = mlp_ratio
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : int = drop_path_rate
snake_case_ : Any = hidden_act
snake_case_ : Tuple = use_absolute_embeddings
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : int = initializer_range
snake_case_ : int = upscale
snake_case_ : Tuple = img_range
snake_case_ : List[Any] = resi_connection
snake_case_ : Dict = upsampler
| 279 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = ''''''
lowerCamelCase_ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase_ : str = None # compression type in fsspec. ex: "gzip"
lowerCamelCase_ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(self , __magic_name__ = "" , __magic_name__ = None , __magic_name__ = None , **__magic_name__ ) -> Any:
'''simple docstring'''
super().__init__(self , **__magic_name__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case_ : Union[str, Any] = fsspec.open(
__magic_name__ , mode='''rb''' , protocol=__magic_name__ , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case_ : Tuple = os.path.basename(self.file.path.split('''::''' )[0] )
snake_case_ : Optional[Any] = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
snake_case_ : Dict = None
@classmethod
def lowerCamelCase (cls , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
return super()._strip_protocol(__magic_name__ ).lstrip('''/''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
if self.dir_cache is None:
snake_case_ : Optional[int] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
snake_case_ : List[str] = {f['''name''']: f}
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return self.file.open().read()
def lowerCamelCase (self , __magic_name__ , __magic_name__ = "rb" , __magic_name__=None , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = self._strip_protocol(__magic_name__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''bz2'''
lowerCamelCase_ : Any = '''bz2'''
lowerCamelCase_ : int = '''.bz2'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''gzip'''
lowerCamelCase_ : Dict = '''gzip'''
lowerCamelCase_ : int = '''.gz'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''lz4'''
lowerCamelCase_ : Any = '''lz4'''
lowerCamelCase_ : Optional[Any] = '''.lz4'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''xz'''
lowerCamelCase_ : Any = '''xz'''
lowerCamelCase_ : int = '''.xz'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''zstd'''
lowerCamelCase_ : Tuple = '''zstd'''
lowerCamelCase_ : Any = '''.zst'''
def __init__(self , __magic_name__ , __magic_name__ = "rb" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = DEFAULT_BLOCK_SIZE , **__magic_name__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
fo=__magic_name__ , mode=__magic_name__ , target_protocol=__magic_name__ , target_options=__magic_name__ , block_size=__magic_name__ , **__magic_name__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case_ : Dict = self.file.__enter__
class __lowerCAmelCase :
def __init__(self , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = file_
def __enter__(self ) -> List[Any]:
'''simple docstring'''
self._file.__enter__()
return self
def __exit__(self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
self._file.__exit__(*__magic_name__ , **__magic_name__ )
def __iter__(self ) -> Optional[int]:
'''simple docstring'''
return iter(self._file )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return next(self._file )
def __getattr__(self , __magic_name__ ) -> str:
'''simple docstring'''
return getattr(self._file , __magic_name__ )
def fixed_enter(*__magic_name__ , **__magic_name__ ):
return WrappedFile(_enter(*__magic_name__ , **__magic_name__ ) )
snake_case_ : Tuple = fixed_enter
| 279 | 1 |
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''only integers accepted as input''' )
else:
snake_case_ : Tuple = str(abs(_UpperCamelCase ) )
snake_case_ : Tuple = [list(_UpperCamelCase ) for char in range(len(_UpperCamelCase ) )]
for index in range(len(_UpperCamelCase ) ):
num_transpositions[index].pop(_UpperCamelCase )
return max(
int(''''''.join(list(_UpperCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 279 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''megatron-bert'''
def __init__(self , __magic_name__=2_9056 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : List[str] = intermediate_size
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : int = initializer_range
snake_case_ : int = layer_norm_eps
snake_case_ : List[str] = position_embedding_type
snake_case_ : Dict = use_cache
| 279 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( _a, _a, unittest.TestCase ):
lowerCamelCase_ : Any = IFInpaintingSuperResolutionPipeline
lowerCamelCase_ : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowerCamelCase_ : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase_ : Any = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def lowerCamelCase (self , __magic_name__ , __magic_name__=0 ) -> List[str]:
'''simple docstring'''
if str(__magic_name__ ).startswith('''mps''' ):
snake_case_ : Dict = torch.manual_seed(__magic_name__ )
else:
snake_case_ : List[str] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
snake_case_ : Union[str, Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
snake_case_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
snake_case_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
snake_case_ : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
self._test_save_load_local()
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 279 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase_ = random.Random()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
"""simple docstring"""
if rng is None:
snake_case_ : str = global_rng
snake_case_ : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=400 , __magic_name__=2000 , __magic_name__=10 , __magic_name__=160 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=4000 , __magic_name__=False , __magic_name__=True , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : str = batch_size
snake_case_ : Union[str, Any] = min_seq_length
snake_case_ : Tuple = max_seq_length
snake_case_ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ : Optional[int] = padding_value
snake_case_ : Union[str, Any] = sampling_rate
snake_case_ : Optional[int] = return_attention_mask
snake_case_ : str = do_normalize
snake_case_ : str = feature_size
snake_case_ : Optional[Any] = chunk_length
snake_case_ : Union[str, Any] = hop_length
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase (self , __magic_name__=False , __magic_name__=False ) -> Optional[Any]:
'''simple docstring'''
def _flatten(__magic_name__ ):
return list(itertools.chain(*__magic_name__ ) )
if equal_length:
snake_case_ : int = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case_ : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ : str = [np.asarray(__magic_name__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = WhisperFeatureExtractionTester(self )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Union[str, Any] = feat_extract_first.save_pretrained(__magic_name__ )[0]
check_json_file_has_correct_format(__magic_name__ )
snake_case_ : List[Any] = self.feature_extraction_class.from_pretrained(__magic_name__ )
snake_case_ : Optional[int] = feat_extract_first.to_dict()
snake_case_ : Dict = feat_extract_second.to_dict()
snake_case_ : List[str] = feat_extract_first.mel_filters
snake_case_ : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = os.path.join(__magic_name__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(__magic_name__ )
snake_case_ : Optional[int] = self.feature_extraction_class.from_json_file(__magic_name__ )
snake_case_ : int = feat_extract_first.to_dict()
snake_case_ : Optional[int] = feat_extract_second.to_dict()
snake_case_ : Union[str, Any] = feat_extract_first.mel_filters
snake_case_ : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ : str = [np.asarray(__magic_name__ ) for speech_input in speech_inputs]
# Test feature size
snake_case_ : str = feature_extractor(__magic_name__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case_ : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
snake_case_ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test batched
snake_case_ : int = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case_ : List[str] = np.asarray(__magic_name__ )
snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
snake_case_ : Dict = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test truncation required
snake_case_ : Any = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
snake_case_ : Union[str, Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs]
snake_case_ : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case_ : Optional[Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs_truncated]
snake_case_ : Any = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
import torch
snake_case_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa )
snake_case_ : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
snake_case_ : Optional[Any] = ds.sort('''id''' ).select(range(__magic_name__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : str = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
snake_case_ : List[Any] = self._load_datasamples(1 )
snake_case_ : Union[str, Any] = WhisperFeatureExtractor()
snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __magic_name__ , atol=1e-4 ) )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Optional[int] = self._load_datasamples(1 )[0]
snake_case_ : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
snake_case_ : Optional[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__magic_name__ )[0]
self.assertTrue(np.all(np.mean(__magic_name__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__magic_name__ ) - 1 ) < 1e-3 ) )
| 279 | 1 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
snake_case_ : Dict = HfArgumentParser(_UpperCamelCase )
snake_case_ : List[Any] = parser.parse_args_into_dataclasses()[0]
snake_case_ : int = TensorFlowBenchmark(args=_UpperCamelCase )
try:
snake_case_ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
snake_case_ : int = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
snake_case_ : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
snake_case_ : Union[str, Any] = ''''''
snake_case_ : Any = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
snake_case_ : Optional[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
snake_case_ : List[str] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 279 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=_UpperCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=_UpperCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=_UpperCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=_UpperCamelCase , default=1_000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=_UpperCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=_UpperCamelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=_UpperCamelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
snake_case_ : List[Any] = parser.parse_args()
return args
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
def fn(_UpperCamelCase ):
return tokenizer(examples['''text'''] )
return fn
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Any = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
snake_case_ : Any = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
snake_case_ : Optional[int] = tf.train.Features(feature=_UpperCamelCase )
snake_case_ : Optional[Any] = tf.train.Example(features=_UpperCamelCase )
snake_case_ : Optional[Any] = example.SerializeToString()
records.append(_UpperCamelCase )
return records
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : int = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
snake_case_ : Union[str, Any] = min(len(_UpperCamelCase ) , args.limit )
snake_case_ : int = dataset.select(range(_UpperCamelCase ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
snake_case_ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
snake_case_ : str = os.path.join(args.output_dir , args.split )
if not os.path.exists(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
else:
snake_case_ : Optional[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
snake_case_ : Optional[Any] = tokenize_function(_UpperCamelCase )
snake_case_ : List[Any] = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_UpperCamelCase ):
# Concatenate all texts.
snake_case_ : Tuple = {k: sum(examples[k] , [] ) for k in examples.keys()}
snake_case_ : List[str] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
snake_case_ : int = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
snake_case_ : Union[str, Any] = {
k: [t[i : i + args.max_length] for i in range(0 , _UpperCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
snake_case_ : int = dataset_tokenized.map(_UpperCamelCase , batched=_UpperCamelCase , batch_size=1_000 , num_proc=4 )
snake_case_ : str = 0
snake_case_ : Optional[Any] = 0
for shard in range(0 , len(_UpperCamelCase ) , args.shard_size ):
snake_case_ : Any = grouped_dataset[shard : shard + args.shard_size]
snake_case_ : str = len(dataset_snapshot['''input_ids'''] )
snake_case_ : Union[str, Any] = os.path.join(_UpperCamelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
snake_case_ : Dict = get_serialized_examples(_UpperCamelCase )
with tf.io.TFRecordWriter(_UpperCamelCase ) as out_file:
for i in range(len(_UpperCamelCase ) ):
snake_case_ : List[str] = serialized_examples[i]
out_file.write(_UpperCamelCase )
print('''Wrote file {} containing {} records'''.format(_UpperCamelCase , _UpperCamelCase ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
main(args)
| 279 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=3 , __magic_name__=224 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case_ : List[str] = parent
snake_case_ : List[str] = batch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Union[str, Any] = image_size
snake_case_ : str = min_resolution
snake_case_ : Optional[Any] = max_resolution
snake_case_ : List[str] = do_resize
snake_case_ : List[str] = size
snake_case_ : Any = do_normalize
snake_case_ : List[str] = image_mean
snake_case_ : Tuple = image_std
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : str = ViTImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = EfficientFormerImageProcessorTester(self )
@property
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_std''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size''' ) )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
snake_case_ : List[Any] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
snake_case_ : Any = image_processor(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
snake_case_ : Any = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
snake_case_ : List[str] = image_processor(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
snake_case_ : Optional[int] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
snake_case_ : int = image_processor(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 279 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Any = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ : List[Any] = VideoClassificationPipeline(model=__magic_name__ , image_processor=__magic_name__ , top_k=2 )
snake_case_ : str = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
for example in examples:
snake_case_ : Union[str, Any] = video_classifier(__magic_name__ )
self.assertEqual(
__magic_name__ , [
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
] , )
@require_torch
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Any = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
snake_case_ : str = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
snake_case_ : int = pipeline(
'''video-classification''' , model=__magic_name__ , feature_extractor=__magic_name__ , frame_sampling_rate=4 )
snake_case_ : List[str] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ : Union[str, Any] = video_classifier(__magic_name__ , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , )
snake_case_ : int = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
| 279 | 1 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCAmelCase_ = HUGGINGFACE_HUB_CACHE
lowerCAmelCase_ = '''config.json'''
lowerCAmelCase_ = '''diffusion_pytorch_model.bin'''
lowerCAmelCase_ = '''diffusion_flax_model.msgpack'''
lowerCAmelCase_ = '''model.onnx'''
lowerCAmelCase_ = '''diffusion_pytorch_model.safetensors'''
lowerCAmelCase_ = '''weights.pb'''
lowerCAmelCase_ = '''https://huggingface.co'''
lowerCAmelCase_ = default_cache_path
lowerCAmelCase_ = '''diffusers_modules'''
lowerCAmelCase_ = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
lowerCAmelCase_ = ['''fp16''', '''non-ema''']
lowerCAmelCase_ = '''.self_attn'''
| 279 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 279 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[str] = '''perceiver'''
def __init__(self , __magic_name__=256 , __magic_name__=1280 , __magic_name__=768 , __magic_name__=1 , __magic_name__=26 , __magic_name__=8 , __magic_name__=8 , __magic_name__=None , __magic_name__=None , __magic_name__="kv" , __magic_name__=1 , __magic_name__=1 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=True , __magic_name__=262 , __magic_name__=2048 , __magic_name__=56 , __magic_name__=[368, 496] , __magic_name__=16 , __magic_name__=1920 , __magic_name__=16 , __magic_name__=[1, 16, 224, 224] , **__magic_name__ , ) -> Any:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : Union[str, Any] = num_latents
snake_case_ : Union[str, Any] = d_latents
snake_case_ : Union[str, Any] = d_model
snake_case_ : str = num_blocks
snake_case_ : Optional[int] = num_self_attends_per_block
snake_case_ : Optional[int] = num_self_attention_heads
snake_case_ : Optional[int] = num_cross_attention_heads
snake_case_ : Dict = qk_channels
snake_case_ : Optional[Any] = v_channels
snake_case_ : Tuple = cross_attention_shape_for_attention
snake_case_ : Union[str, Any] = self_attention_widening_factor
snake_case_ : Optional[int] = cross_attention_widening_factor
snake_case_ : Dict = hidden_act
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : str = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : List[Any] = use_query_residual
# masked language modeling attributes
snake_case_ : int = vocab_size
snake_case_ : List[Any] = max_position_embeddings
# image classification attributes
snake_case_ : Tuple = image_size
# flow attributes
snake_case_ : Any = train_size
# multimodal autoencoding attributes
snake_case_ : Any = num_frames
snake_case_ : Optional[Any] = audio_samples_per_frame
snake_case_ : Any = samples_per_patch
snake_case_ : Optional[Any] = output_shape
class __lowerCAmelCase ( _a ):
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case_ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-4
def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 3 , __magic_name__ = 40 , __magic_name__ = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
if isinstance(__magic_name__ , __magic_name__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ : Tuple = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ : Any = preprocessor.num_special_tokens_to_add(__magic_name__ )
snake_case_ : Dict = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
snake_case_ : Tuple = [''' '''.join(['''a'''] ) * seq_length] * batch_size
snake_case_ : str = dict(preprocessor(__magic_name__ , return_tensors=__magic_name__ ) )
snake_case_ : Optional[Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(__magic_name__ , __magic_name__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ : Dict = compute_effective_axis_dimension(__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch )
snake_case_ : str = self._generate_dummy_images(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
snake_case_ : List[Any] = dict(preprocessor(images=__magic_name__ , return_tensors=__magic_name__ ) )
snake_case_ : Dict = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 279 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase_ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : str = list(s_dict.keys() )
for key in keys:
snake_case_ : Optional[int] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case_ : List[str] = new_key.replace(_UpperCamelCase , _UpperCamelCase )
print(f'''{key} -> {new_key}''' )
snake_case_ : Tuple = s_dict.pop(_UpperCamelCase )
return s_dict
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ , snake_case_ : Dict = emb.weight.shape
snake_case_ : Tuple = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
snake_case_ : Any = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bytes:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : List[Any] = os.path.basename(_UpperCamelCase )
snake_case_ : Any = url.split('''/''' )[-2]
snake_case_ : str = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_UpperCamelCase ):
snake_case_ : Union[str, Any] = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_UpperCamelCase , unit_divisor=1_024 ) as loop:
while True:
snake_case_ : Dict = source.read(8_192 )
if not buffer:
break
output.write(_UpperCamelCase )
loop.update(len(_UpperCamelCase ) )
snake_case_ : Any = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if ".pt" not in checkpoint_path:
snake_case_ : str = _download(_MODELS[checkpoint_path] )
else:
snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' )
snake_case_ : int = original_checkpoint['''dims''']
snake_case_ : List[str] = original_checkpoint['''model_state_dict''']
snake_case_ : str = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
snake_case_ : Optional[int] = True
snake_case_ : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
snake_case_ : List[str] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
snake_case_ : Union[str, Any] = WhisperForConditionalGeneration(_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
snake_case_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ : Any = proj_out_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 279 | 1 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1_024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1_024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
snake_case_ : Dict = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case_ : List[Any] = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=_UpperCamelCase , output_all_encodings=_UpperCamelCase , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , _UpperCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case_ : Optional[int] = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
snake_case_ : Optional[Any] = os.path.join(get_home_dir() , '''models''' )
snake_case_ : Any = _load_vocab(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , cls=_UpperCamelCase )
snake_case_ : str = nlp.model.BERTModel(
_UpperCamelCase , len(_UpperCamelCase ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=_UpperCamelCase , use_token_type_embed=_UpperCamelCase , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=_UpperCamelCase , use_decoder=_UpperCamelCase , )
original_bort.load_parameters(_UpperCamelCase , cast_dtype=_UpperCamelCase , ignore_extra=_UpperCamelCase )
snake_case_ : Optional[int] = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case_ : int = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(_UpperCamelCase ),
}
snake_case_ : Optional[Any] = BertConfig.from_dict(_UpperCamelCase )
snake_case_ : Union[str, Any] = BertForMaskedLM(_UpperCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_UpperCamelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Optional[Any] = hf_param.shape
snake_case_ : Any = to_torch(params[gluon_param] )
snake_case_ : Any = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
snake_case_ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
snake_case_ : Any = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
snake_case_ : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
snake_case_ : Dict = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case_ : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case_ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case_ : BertSelfAttention = layer.attention.self
snake_case_ : Dict = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
snake_case_ : List[Any] = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
snake_case_ : Union[str, Any] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
snake_case_ : Union[str, Any] = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
snake_case_ : str = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
snake_case_ : Optional[Any] = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
snake_case_ : BertSelfOutput = layer.attention.output
snake_case_ : List[str] = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
snake_case_ : int = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
snake_case_ : Tuple = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
snake_case_ : Any = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
snake_case_ : BertIntermediate = layer.intermediate
snake_case_ : List[Any] = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
snake_case_ : List[Any] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
snake_case_ : BertOutput = layer.output
snake_case_ : int = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
snake_case_ : Dict = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
snake_case_ : Dict = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
snake_case_ : Tuple = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case_ : int = RobertaTokenizer.from_pretrained('''roberta-base''' )
snake_case_ : str = tokenizer.encode_plus(_UpperCamelCase )['''input_ids''']
# Get gluon output
snake_case_ : Optional[Any] = mx.nd.array([input_ids] )
snake_case_ : str = original_bort(inputs=_UpperCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_UpperCamelCase )
snake_case_ : str = BertModel.from_pretrained(_UpperCamelCase )
hf_bort_model.eval()
snake_case_ : Dict = tokenizer.encode_plus(_UpperCamelCase , return_tensors='''pt''' )
snake_case_ : List[Any] = hf_bort_model(**_UpperCamelCase )[0]
snake_case_ : List[Any] = output_gluon[0].asnumpy()
snake_case_ : Tuple = output_hf[0].detach().numpy()
snake_case_ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case_ : Tuple = np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , _UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 279 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase_ = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase_ = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase_ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase_ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase_ = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase_ = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase_ = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ , snake_case_ : Any = randrange(len(_UpperCamelCase ) ), randrange(len(_UpperCamelCase ) )
snake_case_ : Any = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
snake_case_ , snake_case_ : Tuple = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase_ ( _UpperCamelCase = 100 ) -> str:
"""simple docstring"""
return (generate_random_hand() for _ in range(_UpperCamelCase ))
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : str = PokerHand(_UpperCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Dict = [PokerHand(_UpperCamelCase ) for hand in SORTED_HANDS]
snake_case_ : str = poker_hands.copy()
shuffle(_UpperCamelCase )
snake_case_ : List[str] = chain(sorted(_UpperCamelCase ) )
for index, hand in enumerate(_UpperCamelCase ):
assert hand == poker_hands[index]
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=_UpperCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Dict = PokerHand('''2C 4S AS 3D 5C''' )
snake_case_ : str = True
snake_case_ : Tuple = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
snake_case_ : List[str] = 0
snake_case_ : Union[str, Any] = os.path.abspath(os.path.dirname(_UpperCamelCase ) )
snake_case_ : Dict = os.path.join(_UpperCamelCase , '''poker_hands.txt''' )
with open(_UpperCamelCase ) as file_hand:
for line in file_hand:
snake_case_ : Dict = line[:14].strip()
snake_case_ : List[str] = line[15:].strip()
snake_case_ , snake_case_ : str = PokerHand(_UpperCamelCase ), PokerHand(_UpperCamelCase )
snake_case_ : int = player.compare_with(_UpperCamelCase )
if output == "Win":
answer += 1
assert answer == 376
| 279 | 1 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase_ ( _UpperCamelCase ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> XGBClassifier:
"""simple docstring"""
snake_case_ : Dict = XGBClassifier()
classifier.fit(_UpperCamelCase , _UpperCamelCase )
return classifier
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
snake_case_ : Tuple = load_iris()
snake_case_ , snake_case_ : Union[str, Any] = data_handling(_UpperCamelCase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = train_test_split(
_UpperCamelCase , _UpperCamelCase , test_size=0.25 )
snake_case_ : Optional[Any] = iris['''target_names''']
# Create an XGBoost Classifier from the training data
snake_case_ : Any = xgboost(_UpperCamelCase , _UpperCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , display_labels=_UpperCamelCase , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 279 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : str
lowerCamelCase_ : List[str]
lowerCamelCase_ : Optional[List[str]]
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : List[int]
lowerCamelCase_ : List[int]
lowerCamelCase_ : Optional[List[int]] = None
lowerCamelCase_ : Optional[List[int]] = None
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : str = '''train'''
lowerCamelCase_ : List[str] = '''dev'''
lowerCamelCase_ : List[Any] = '''test'''
class __lowerCAmelCase :
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ ) -> List[InputExample]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def lowerCamelCase (__magic_name__ ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__="[CLS]" , __magic_name__=1 , __magic_name__="[SEP]" , __magic_name__=False , __magic_name__=False , __magic_name__=0 , __magic_name__=0 , __magic_name__=-100 , __magic_name__=0 , __magic_name__=True , ) -> List[InputFeatures]:
'''simple docstring'''
snake_case_ : Optional[int] = {label: i for i, label in enumerate(__magic_name__ )}
snake_case_ : Dict = []
for ex_index, example in enumerate(__magic_name__ ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' , __magic_name__ , len(__magic_name__ ) )
snake_case_ : List[str] = []
snake_case_ : List[str] = []
for word, label in zip(example.words , example.labels ):
snake_case_ : Optional[Any] = tokenizer.tokenize(__magic_name__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__magic_name__ ) > 0:
tokens.extend(__magic_name__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__magic_name__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
snake_case_ : Union[str, Any] = tokenizer.num_special_tokens_to_add()
if len(__magic_name__ ) > max_seq_length - special_tokens_count:
snake_case_ : str = tokens[: (max_seq_length - special_tokens_count)]
snake_case_ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
snake_case_ : Union[str, Any] = [sequence_a_segment_id] * len(__magic_name__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
snake_case_ : Union[str, Any] = [cls_token] + tokens
snake_case_ : List[Any] = [pad_token_label_id] + label_ids
snake_case_ : Optional[Any] = [cls_token_segment_id] + segment_ids
snake_case_ : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
snake_case_ : int = [1 if mask_padding_with_zero else 0] * len(__magic_name__ )
# Zero-pad up to the sequence length.
snake_case_ : Optional[int] = max_seq_length - len(__magic_name__ )
if pad_on_left:
snake_case_ : Optional[Any] = ([pad_token] * padding_length) + input_ids
snake_case_ : Optional[int] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
snake_case_ : Optional[Any] = ([pad_token_segment_id] * padding_length) + segment_ids
snake_case_ : Dict = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__magic_name__ ) == max_seq_length
assert len(__magic_name__ ) == max_seq_length
assert len(__magic_name__ ) == max_seq_length
assert len(__magic_name__ ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(__magic_name__ ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
snake_case_ : int = None
features.append(
InputFeatures(
input_ids=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , label_ids=__magic_name__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[InputFeatures]
lowerCamelCase_ : int = nn.CrossEntropyLoss().ignore_index
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = os.path.join(
__magic_name__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__magic_name__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : Dict = cached_features_file + '''.lock'''
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
snake_case_ : Dict = torch.load(__magic_name__ )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
snake_case_ : Any = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case_ : int = token_classification_task.convert_examples_to_features(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , __magic_name__ )
def __len__(self ) -> Optional[Any]:
'''simple docstring'''
return len(self.features )
def __getitem__(self , __magic_name__ ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __lowerCAmelCase :
lowerCamelCase_ : List[InputFeatures]
lowerCamelCase_ : int = -100
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case_ : int = token_classification_task.convert_examples_to_features(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
snake_case_ : Optional[Any] = tf.data.Dataset.from_generator(
__magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
snake_case_ : int = tf.data.Dataset.from_generator(
__magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__(self ) -> str:
'''simple docstring'''
return len(self.features )
def __getitem__(self , __magic_name__ ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
| 279 | 1 |
from math import loga
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = SpeechTaTokenizer
lowerCamelCase_ : int = False
lowerCamelCase_ : Dict = True
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : Tuple = SpeechTaTokenizer(__magic_name__ )
snake_case_ : Any = AddedToken('''<mask>''' , lstrip=__magic_name__ , rstrip=__magic_name__ )
snake_case_ : int = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = '''this is a test'''
snake_case_ : int = '''this is a test'''
return input_text, output_text
def lowerCamelCase (self , __magic_name__ , __magic_name__=False , __magic_name__=20 , __magic_name__=5 ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : int = self.get_input_output_texts(__magic_name__ )
snake_case_ : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
snake_case_ : Any = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
return text, ids
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = '''<pad>'''
snake_case_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__magic_name__ ) , 81 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : int = tokenizer.vocab_size
snake_case_ : Optional[Any] = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ : List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
snake_case_ : List[Any] = tokenizer.add_tokens(__magic_name__ )
snake_case_ : Dict = tokenizer.vocab_size
snake_case_ : Optional[Any] = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size + len(__magic_name__ ) )
snake_case_ : Union[str, Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ : Union[str, Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
snake_case_ : List[str] = tokenizer.add_special_tokens(__magic_name__ )
snake_case_ : Dict = tokenizer.vocab_size
snake_case_ : Dict = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size_a + len(__magic_name__ ) )
snake_case_ : Tuple = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : Optional[Any] = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__magic_name__ , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
snake_case_ : List[str] = tokenizer.convert_tokens_to_ids(__magic_name__ )
# fmt: off
self.assertListEqual(__magic_name__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ : int = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
snake_case_ : List[Any] = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__magic_name__ , )
| 279 | 1 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Tuple = CLIPTokenizer
lowerCamelCase_ : Tuple = CLIPTokenizerFast
lowerCamelCase_ : List[str] = True
lowerCamelCase_ : Tuple = {}
lowerCamelCase_ : Any = False
def lowerCamelCase (self ) -> int:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Optional[int] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
snake_case_ : str = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
snake_case_ : Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
snake_case_ : Dict = {'''unk_token''': '''<unk>'''}
snake_case_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def lowerCamelCase (self , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase (self , **__magic_name__ ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : Dict = '''lower newer'''
snake_case_ : Union[str, Any] = '''lower newer'''
return input_text, output_text
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : Union[str, Any] = '''lower newer'''
snake_case_ : Union[str, Any] = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
snake_case_ : Union[str, Any] = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
snake_case_ : int = tokens + [tokenizer.unk_token]
snake_case_ : Any = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@require_ftfy
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : List[str] = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
snake_case_ : int = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
snake_case_ : Union[str, Any] = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
snake_case_ : Any = tokenizer_s.tokenize(__magic_name__ )
snake_case_ : List[Any] = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
snake_case_ : Optional[Any] = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
snake_case_ : int = tokenizer_s.tokenize(__magic_name__ )
snake_case_ : Any = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on unicode of space type
snake_case_ : int = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
snake_case_ : List[str] = tokenizer_s.tokenize(__magic_name__ )
snake_case_ : Optional[int] = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on unicode of line break type
snake_case_ : int = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
snake_case_ : str = tokenizer_s.tokenize(__magic_name__ )
snake_case_ : Tuple = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : Optional[Any] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ : str = F'''{text_of_1_token} {text_of_1_token}'''
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , )
snake_case_ : Any = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ) + 1, len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
snake_case_ : Any = F''' {text}'''
snake_case_ : str = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , )
snake_case_ : Optional[Any] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ) + 1, 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
with self.assertRaises(__magic_name__ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
pass
| 279 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = [[float('''inf''' ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case_ : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_UpperCamelCase ):
# looping through rows of graph array
for i in range(_UpperCamelCase ):
# looping through columns of graph array
for j in range(_UpperCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case_ : List[Any] = dist[i][k] + dist[k][j]
_print_dist(_UpperCamelCase , _UpperCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('''Enter number of vertices: '''))
lowerCAmelCase_ = int(input('''Enter number of edges: '''))
lowerCAmelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowerCAmelCase_ = int(input('''Enter source:'''))
lowerCAmelCase_ = int(input('''Enter destination:'''))
lowerCAmelCase_ = float(input('''Enter weight:'''))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 279 | 1 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase_ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : str = list(s_dict.keys() )
for key in keys:
snake_case_ : Optional[int] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case_ : List[str] = new_key.replace(_UpperCamelCase , _UpperCamelCase )
print(f'''{key} -> {new_key}''' )
snake_case_ : Tuple = s_dict.pop(_UpperCamelCase )
return s_dict
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ , snake_case_ : Dict = emb.weight.shape
snake_case_ : Tuple = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
snake_case_ : Any = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bytes:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : List[Any] = os.path.basename(_UpperCamelCase )
snake_case_ : Any = url.split('''/''' )[-2]
snake_case_ : str = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_UpperCamelCase ):
snake_case_ : Union[str, Any] = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_UpperCamelCase , unit_divisor=1_024 ) as loop:
while True:
snake_case_ : Dict = source.read(8_192 )
if not buffer:
break
output.write(_UpperCamelCase )
loop.update(len(_UpperCamelCase ) )
snake_case_ : Any = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if ".pt" not in checkpoint_path:
snake_case_ : str = _download(_MODELS[checkpoint_path] )
else:
snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' )
snake_case_ : int = original_checkpoint['''dims''']
snake_case_ : List[str] = original_checkpoint['''model_state_dict''']
snake_case_ : str = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
snake_case_ : Optional[int] = True
snake_case_ : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
snake_case_ : List[str] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
snake_case_ : Union[str, Any] = WhisperForConditionalGeneration(_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
snake_case_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ : Any = proj_out_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 279 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __lowerCAmelCase :
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
return None
class __lowerCAmelCase :
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
return None
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Dict = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ )
@require_torch
@slow
def lowerCamelCase (self ) -> int:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ )
@require_torch
@slow
def lowerCamelCase (self ) -> int:
'''simple docstring'''
from transformers import BertModel
snake_case_ : str = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(__magic_name__ ) )
vocab_file.flush()
snake_case_ : Optional[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
snake_case_ : str = BertModel(BertConfig(vocab_size=len(__magic_name__ ) ) )
model.save_pretrained(__magic_name__ )
self._test_export(__magic_name__ , '''pt''' , 12 , __magic_name__ )
@require_tf
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case_ : Tuple = self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ )
snake_case_ : List[str] = quantize(Path(__magic_name__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case_ : Any = self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ )
snake_case_ : Any = quantize(__magic_name__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Tuple:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
snake_case_ : List[str] = Path(__magic_name__ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
return path
except Exception as e:
self.fail(__magic_name__ )
@require_torch
@require_tokenizers
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
from transformers import BertModel
snake_case_ : Optional[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
snake_case_ : int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
from transformers import TFBertModel
snake_case_ : Any = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
snake_case_ : str = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''tf''' )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : Tuple = FeatureExtractionPipeline(__magic_name__ , __magic_name__ )
snake_case_ : Optional[int] = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = infer_shapes(__magic_name__ , __magic_name__ )
# Assert all variables are present
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __magic_name__ )
self.assertSequenceEqual(variable_names[3:] , __magic_name__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
snake_case_ : List[str] = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
snake_case_ , snake_case_ : Tuple = ensure_valid_input(FuncContiguousArgs() , __magic_name__ , __magic_name__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__magic_name__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__magic_name__ ) , set(__magic_name__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__magic_name__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
snake_case_ , snake_case_ : Dict = ensure_valid_input(FuncNonContiguousArgs() , __magic_name__ , __magic_name__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(len(__magic_name__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 279 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 279 |
lowerCAmelCase_ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355_818,
}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case_ : str = (
f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
f'''Valid values are: {", ".join(_UpperCamelCase )}'''
)
raise ValueError(_UpperCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 1 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=4 , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = parent
snake_case_ : str = batch_size
snake_case_ : Optional[int] = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : List[str] = use_attention_mask
snake_case_ : Any = use_token_type_ids
snake_case_ : Any = use_labels
snake_case_ : Any = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : str = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : Tuple = hidden_act
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Optional[Any] = type_sequence_label_size
snake_case_ : List[Any] = initializer_range
snake_case_ : str = num_choices
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : int = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : str = None
if self.use_token_type_ids:
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Optional[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Tuple = config_and_inputs
snake_case_ : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : List[str] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Union[str, Any] = model_class_name.from_pretrained('''albert-base-v2''' )
snake_case_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
snake_case_ : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case_ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ )[0]
snake_case_ : Union[str, Any] = (1, 11, 768)
self.assertEqual(output.shape , __magic_name__ )
snake_case_ : List[str] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
| 279 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCAmelCase_ = datasets.logging.get_logger(__name__)
lowerCAmelCase_ = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
lowerCAmelCase_ = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
lowerCAmelCase_ = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
lowerCAmelCase_ = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def lowerCamelCase (self , __magic_name__ ) -> List[Any]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
snake_case_ : Dict = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
snake_case_ : Optional[int] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
snake_case_ : Union[str, Any] = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
snake_case_ : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
snake_case_ : Dict = score.BleurtScorer(os.path.join(__magic_name__ , __magic_name__ ) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.scorer.score(references=__magic_name__ , candidates=__magic_name__ )
return {"scores": scores}
| 279 | 1 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCamelCase_ ( *_UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase=True , _UpperCamelCase=2 ) -> Any:
"""simple docstring"""
from .. import __version__
snake_case_ : Dict = take_from
snake_case_ : Optional[int] = ()
if not isinstance(args[0] , _UpperCamelCase ):
snake_case_ : Union[str, Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_UpperCamelCase ).base_version ) >= version.parse(_UpperCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
snake_case_ : List[Any] = None
if isinstance(_UpperCamelCase , _UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_UpperCamelCase ),)
snake_case_ : Tuple = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_UpperCamelCase , _UpperCamelCase ):
values += (getattr(_UpperCamelCase , _UpperCamelCase ),)
snake_case_ : Optional[int] = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
snake_case_ : Union[str, Any] = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
snake_case_ : Optional[Any] = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _UpperCamelCase , stacklevel=_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) > 0:
snake_case_ : Any = inspect.getouterframes(inspect.currentframe() )[1]
snake_case_ : Optional[int] = call_frame.filename
snake_case_ : str = call_frame.lineno
snake_case_ : Dict = call_frame.function
snake_case_ , snake_case_ : str = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_UpperCamelCase ) == 0:
return
elif len(_UpperCamelCase ) == 1:
return values[0]
return values
| 279 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
lowerCAmelCase_ = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
lowerCAmelCase_ = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Dict = (images / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Dict = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case_ : int = numpy_to_pil(_UpperCamelCase )
return images
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
if images.ndim == 3:
snake_case_ : Optional[Any] = images[None, ...]
snake_case_ : Any = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
snake_case_ : str = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
snake_case_ : List[Any] = [Image.fromarray(_UpperCamelCase ) for image in images]
return pil_images
| 279 | 1 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase_ = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase_ = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase_ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase_ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase_ = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase_ = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase_ = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ , snake_case_ : Any = randrange(len(_UpperCamelCase ) ), randrange(len(_UpperCamelCase ) )
snake_case_ : Any = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
snake_case_ , snake_case_ : Tuple = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase_ ( _UpperCamelCase = 100 ) -> str:
"""simple docstring"""
return (generate_random_hand() for _ in range(_UpperCamelCase ))
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : str = PokerHand(_UpperCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Dict = [PokerHand(_UpperCamelCase ) for hand in SORTED_HANDS]
snake_case_ : str = poker_hands.copy()
shuffle(_UpperCamelCase )
snake_case_ : List[str] = chain(sorted(_UpperCamelCase ) )
for index, hand in enumerate(_UpperCamelCase ):
assert hand == poker_hands[index]
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=_UpperCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Dict = PokerHand('''2C 4S AS 3D 5C''' )
snake_case_ : str = True
snake_case_ : Tuple = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
snake_case_ : List[str] = 0
snake_case_ : Union[str, Any] = os.path.abspath(os.path.dirname(_UpperCamelCase ) )
snake_case_ : Dict = os.path.join(_UpperCamelCase , '''poker_hands.txt''' )
with open(_UpperCamelCase ) as file_hand:
for line in file_hand:
snake_case_ : Dict = line[:14].strip()
snake_case_ : List[str] = line[15:].strip()
snake_case_ , snake_case_ : str = PokerHand(_UpperCamelCase ), PokerHand(_UpperCamelCase )
snake_case_ : int = player.compare_with(_UpperCamelCase )
if output == "Win":
answer += 1
assert answer == 376
| 279 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Any = BioGptTokenizer
lowerCamelCase_ : Optional[Any] = False
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
snake_case_ : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__magic_name__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : str = '''lower newer'''
snake_case_ : Dict = '''lower newer'''
return input_text, output_text
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file )
snake_case_ : Union[str, Any] = '''lower'''
snake_case_ : Optional[int] = ['''low''', '''er</w>''']
snake_case_ : Any = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
snake_case_ : Optional[int] = tokens + ['''<unk>''']
snake_case_ : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__magic_name__ )
snake_case_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__magic_name__ )
snake_case_ : str = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 279 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __lowerCAmelCase :
def __init__(self , __magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : Tuple = 13
snake_case_ : List[str] = 7
snake_case_ : Union[str, Any] = True
snake_case_ : Tuple = True
snake_case_ : Union[str, Any] = True
snake_case_ : Union[str, Any] = True
snake_case_ : Dict = True
snake_case_ : Optional[int] = False
snake_case_ : str = False
snake_case_ : Tuple = False
snake_case_ : Optional[int] = 2
snake_case_ : Union[str, Any] = 99
snake_case_ : Any = 0
snake_case_ : List[Any] = 32
snake_case_ : Tuple = 2
snake_case_ : Optional[int] = 4
snake_case_ : Optional[Any] = 0.1
snake_case_ : Tuple = 0.1
snake_case_ : Optional[int] = 512
snake_case_ : Any = 16
snake_case_ : Union[str, Any] = 2
snake_case_ : Dict = 0.02
snake_case_ : List[Any] = 3
snake_case_ : int = 4
snake_case_ : int = '''last'''
snake_case_ : Optional[int] = True
snake_case_ : List[str] = None
snake_case_ : str = 0
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : str = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
snake_case_ : Optional[int] = None
if self.use_input_lengths:
snake_case_ : Any = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case_ : Union[str, Any] = None
snake_case_ : Optional[int] = None
snake_case_ : Dict = None
if self.use_labels:
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Dict = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = TFFlaubertModel(config=__magic_name__ )
snake_case_ : str = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
snake_case_ : Any = model(__magic_name__ )
snake_case_ : str = [input_ids, input_mask]
snake_case_ : str = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = TFFlaubertWithLMHeadModel(__magic_name__ )
snake_case_ : int = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
snake_case_ : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = TFFlaubertForQuestionAnsweringSimple(__magic_name__ )
snake_case_ : int = {'''input_ids''': input_ids, '''lengths''': input_lengths}
snake_case_ : str = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = TFFlaubertForSequenceClassification(__magic_name__ )
snake_case_ : Union[str, Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
snake_case_ : str = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> int:
'''simple docstring'''
snake_case_ : Any = self.num_labels
snake_case_ : List[str] = TFFlaubertForTokenClassification(config=__magic_name__ )
snake_case_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ : Any = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.num_choices
snake_case_ : Tuple = TFFlaubertForMultipleChoice(config=__magic_name__ )
snake_case_ : List[Any] = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
snake_case_ : Any = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
snake_case_ : List[Any] = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
snake_case_ : Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
snake_case_ : int = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : int = config_and_inputs
snake_case_ : Union[str, Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( _a, _a, unittest.TestCase ):
lowerCamelCase_ : Any = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase_ : Dict = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCamelCase_ : Any = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase_ : int = False
lowerCamelCase_ : List[Any] = False
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : int = TFFlaubertModelTester(self )
snake_case_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , emb_dim=37 )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__magic_name__ )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__magic_name__ )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__magic_name__ )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__magic_name__ )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__magic_name__ )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__magic_name__ )
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : List[Any] = TFFlaubertModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
snake_case_ : int = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
snake_case_ : Tuple = model(__magic_name__ )[0]
snake_case_ : str = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
snake_case_ : List[str] = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 279 |
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, list[float]]:
"""simple docstring"""
snake_case_ : Dict = list(range(len(_UpperCamelCase ) ) )
snake_case_ : Dict = [v / w for v, w in zip(_UpperCamelCase , _UpperCamelCase )]
index.sort(key=lambda _UpperCamelCase : ratio[i] , reverse=_UpperCamelCase )
snake_case_ : float = 0
snake_case_ : list[float] = [0] * len(_UpperCamelCase )
for i in index:
if weight[i] <= capacity:
snake_case_ : Dict = 1
max_value += value[i]
capacity -= weight[i]
else:
snake_case_ : Union[str, Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : Optional[int] = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCamelCase_ : bool = field(
default=_a, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
lowerCamelCase_ : Optional[int] = field(
default=_a, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
}, )
lowerCamelCase_ : Optional[int] = field(
default=_a, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
}, )
lowerCamelCase_ : Optional[int] = field(
default=_a, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
}, )
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : str = field(
default=_a, metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase_ : str = field(
default=_a, metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
lowerCamelCase_ : Optional[bool] = field(
default=_a, metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
lowerCamelCase_ : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''}, )
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ : Dict = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case_ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
snake_case_ : Union[str, Any] = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case_ : Union[str, Any] = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Optional[Any] = train_dataset.features['''label'''].names
if training_args.do_eval:
snake_case_ : str = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Union[str, Any] = eval_dataset.features['''label'''].names
if training_args.do_predict:
snake_case_ : Dict = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : str = predict_dataset.features['''label'''].names
# Labels
snake_case_ : Optional[Any] = len(_UpperCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCamelCase , idalabel={str(_UpperCamelCase ): label for i, label in enumerate(_UpperCamelCase )} , labelaid={label: i for i, label in enumerate(_UpperCamelCase )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : int = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
snake_case_ : Dict = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case_ : Any = False
def preprocess_function(_UpperCamelCase ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=_UpperCamelCase , max_length=data_args.max_seq_length , truncation=_UpperCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case_ : int = min(len(_UpperCamelCase ) , data_args.max_train_samples )
snake_case_ : Tuple = train_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
snake_case_ : str = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_UpperCamelCase ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case_ : Tuple = min(len(_UpperCamelCase ) , data_args.max_eval_samples )
snake_case_ : Union[str, Any] = eval_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
snake_case_ : Optional[Any] = eval_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
snake_case_ : List[str] = min(len(_UpperCamelCase ) , data_args.max_predict_samples )
snake_case_ : Any = predict_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
snake_case_ : List[Any] = predict_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
snake_case_ : int = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase ):
snake_case_ : Optional[Any] = p.predictions[0] if isinstance(p.predictions , _UpperCamelCase ) else p.predictions
snake_case_ : Optional[Any] = np.argmax(_UpperCamelCase , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case_ : Dict = default_data_collator
elif training_args.fpaa:
snake_case_ : Optional[Any] = DataCollatorWithPadding(_UpperCamelCase , pad_to_multiple_of=8 )
else:
snake_case_ : str = None
# Initialize our Trainer
snake_case_ : Union[str, Any] = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
snake_case_ : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
snake_case_ : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ : List[Any] = last_checkpoint
snake_case_ : Any = trainer.train(resume_from_checkpoint=_UpperCamelCase )
snake_case_ : Union[str, Any] = train_result.metrics
snake_case_ : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
snake_case_ : int = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , _UpperCamelCase )
trainer.save_metrics('''train''' , _UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case_ : Optional[Any] = trainer.evaluate(eval_dataset=_UpperCamelCase )
snake_case_ : List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
snake_case_ : Any = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('''eval''' , _UpperCamelCase )
trainer.save_metrics('''eval''' , _UpperCamelCase )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
snake_case_ , snake_case_ , snake_case_ : List[Any] = trainer.predict(_UpperCamelCase , metric_key_prefix='''predict''' )
snake_case_ : str = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_UpperCamelCase )
)
snake_case_ : Optional[int] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('''predict''' , _UpperCamelCase )
trainer.save_metrics('''predict''' , _UpperCamelCase )
snake_case_ : Dict = np.argmax(_UpperCamelCase , axis=1 )
snake_case_ : Dict = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(_UpperCamelCase , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(_UpperCamelCase ):
snake_case_ : Dict = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 279 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=32 , __magic_name__=True , ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Union[str, Any] = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : int = min_resolution
snake_case_ : Any = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : str = size_divisor
snake_case_ : Optional[Any] = do_rescale
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = GLPNImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = GLPNImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size_divisor''' ) )
self.assertTrue(hasattr(__magic_name__ , '''resample''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_rescale''' ) )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 279 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 1_6
lowerCAmelCase_ = 3_2
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = 16 ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case_ : List[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : List[str] = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : str = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : str = 16
elif accelerator.mixed_precision != "no":
snake_case_ : str = 8
else:
snake_case_ : Dict = None
return tokenizer.pad(
_UpperCamelCase , padding='''longest''' , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case_ : int = DataLoader(
tokenized_datasets['''train'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
snake_case_ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _UpperCamelCase ) == "1":
snake_case_ : List[str] = 2
# New Code #
snake_case_ : Any = int(args.gradient_accumulation_steps )
snake_case_ : Dict = int(args.local_sgd_steps )
# Initialize accelerator
snake_case_ : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : str = config['''lr''']
snake_case_ : List[str] = int(config['''num_epochs'''] )
snake_case_ : Optional[Any] = int(config['''seed'''] )
snake_case_ : List[Any] = int(config['''batch_size'''] )
snake_case_ : Optional[int] = evaluate.load('''glue''' , '''mrpc''' )
set_seed(_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = get_dataloaders(_UpperCamelCase , _UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Tuple = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : List[str] = AdamW(params=model.parameters() , lr=_UpperCamelCase )
# Instantiate scheduler
snake_case_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : int = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=_UpperCamelCase , model=_UpperCamelCase , local_sgd_steps=_UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCamelCase ):
snake_case_ : Any = model(**_UpperCamelCase )
snake_case_ : int = output.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : List[Any] = model(**_UpperCamelCase )
snake_case_ : Dict = outputs.logits.argmax(dim=-1 )
snake_case_ , snake_case_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
snake_case_ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _UpperCamelCase )
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_UpperCamelCase , default=_UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_UpperCamelCase , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=_UpperCamelCase , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case_ : Dict = parser.parse_args()
snake_case_ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 279 | 1 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : str = RoCBertTokenizer
lowerCamelCase_ : int = None
lowerCamelCase_ : Dict = False
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : Optional[Any] = filter_non_english
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
snake_case_ : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
snake_case_ : Optional[Any] = {}
snake_case_ : int = {}
for i, value in enumerate(__magic_name__ ):
snake_case_ : List[str] = i
snake_case_ : Dict = i
snake_case_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
snake_case_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(__magic_name__ , __magic_name__ , ensure_ascii=__magic_name__ )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(__magic_name__ , __magic_name__ , ensure_ascii=__magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : str = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case_ : List[str] = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(__magic_name__ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__magic_name__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__magic_name__ ) , [5, 6, 2, 5, 7, 8] )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = RoCBertBasicTokenizer(do_lower_case=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = RoCBertBasicTokenizer(do_lower_case=__magic_name__ , strip_accents=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = RoCBertBasicTokenizer(do_lower_case=__magic_name__ , strip_accents=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = RoCBertBasicTokenizer(do_lower_case=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=__magic_name__ , strip_accents=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=__magic_name__ , strip_accents=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = RoCBertBasicTokenizer(do_lower_case=__magic_name__ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
snake_case_ : Union[str, Any] = {}
for i, token in enumerate(__magic_name__ ):
snake_case_ : List[str] = i
snake_case_ : List[str] = RoCBertWordpieceTokenizer(vocab=__magic_name__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__magic_name__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
snake_case_ : Dict = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__magic_name__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
snake_case_ : Optional[Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
snake_case_ : List[str] = tokenizer_r.encode_plus(
__magic_name__ , return_attention_mask=__magic_name__ , return_token_type_ids=__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ , )
snake_case_ : Any = tokenizer_r.do_lower_case if hasattr(__magic_name__ , '''do_lower_case''' ) else False
snake_case_ : Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = ['''的''', '''人''', '''有''']
snake_case_ : Union[str, Any] = ''''''.join(__magic_name__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : Dict = True
snake_case_ : Optional[int] = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
snake_case_ : str = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
snake_case_ : str = tokenizer_p.encode(__magic_name__ , add_special_tokens=__magic_name__ )
snake_case_ : int = tokenizer_r.encode(__magic_name__ , add_special_tokens=__magic_name__ )
snake_case_ : Optional[int] = tokenizer_r.convert_ids_to_tokens(__magic_name__ )
snake_case_ : Optional[int] = tokenizer_p.convert_ids_to_tokens(__magic_name__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
snake_case_ : Dict = False
snake_case_ : int = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
snake_case_ : Tuple = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
snake_case_ : Any = tokenizer_r.encode(__magic_name__ , add_special_tokens=__magic_name__ )
snake_case_ : List[Any] = tokenizer_p.encode(__magic_name__ , add_special_tokens=__magic_name__ )
snake_case_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(__magic_name__ )
snake_case_ : Any = tokenizer_p.convert_ids_to_tokens(__magic_name__ )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case_ : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__magic_name__ )
]
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
@slow
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case_ : str = tokenizer.encode('''你好''' , add_special_tokens=__magic_name__ )
snake_case_ : int = tokenizer.encode('''你是谁''' , add_special_tokens=__magic_name__ )
snake_case_ : Dict = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
snake_case_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Any = '''你好,你是谁'''
snake_case_ : Tuple = tokenizer.tokenize(__magic_name__ )
snake_case_ : Optional[int] = tokenizer.convert_tokens_to_ids(__magic_name__ )
snake_case_ : Dict = tokenizer.convert_tokens_to_shape_ids(__magic_name__ )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_pronunciation_ids(__magic_name__ )
snake_case_ : Dict = tokenizer.prepare_for_model(
__magic_name__ , __magic_name__ , __magic_name__ , add_special_tokens=__magic_name__ )
snake_case_ : int = tokenizer.encode_plus(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
| 279 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase_ = float('''nan''')
class __lowerCAmelCase :
def __init__(self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = sys.stdout
snake_case_ : int = open(__magic_name__ , '''a''' )
def __getattr__(self , __magic_name__ ) -> Dict:
'''simple docstring'''
return getattr(self.stdout , __magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
self.stdout.write(__magic_name__ )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , __magic_name__ , 0 , re.M ) )
def lowerCamelCase_ ( _UpperCamelCase=80 , _UpperCamelCase=False ) -> str:
"""simple docstring"""
snake_case_ : str = []
# deal with critical env vars
snake_case_ : int = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
snake_case_ : Optional[int] = os.environ.get(_UpperCamelCase , _UpperCamelCase )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
snake_case_ : Optional[int] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(_UpperCamelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
snake_case_ : Dict = []
snake_case_ : Dict = ''''''
while len(_UpperCamelCase ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCamelCase )
snake_case_ : List[Any] = ''''''
return "\\\n".join(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : str = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
snake_case_ : Optional[Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
snake_case_ : int = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , )
snake_case_ : Tuple = subprocess.run(_UpperCamelCase , capture_output=_UpperCamelCase , text=_UpperCamelCase )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
snake_case_ : Any = variation.replace(''' ''' , '''-''' )
with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stdout.txt''' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stderr.txt''' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f:
snake_case_ : str = json.load(_UpperCamelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Tuple:
"""simple docstring"""
snake_case_ : Tuple = []
snake_case_ : Any = []
snake_case_ : int = f'''{id}: {variation:<{longest_variation_len}}'''
snake_case_ : Optional[Any] = f'''{preamble}: '''
snake_case_ : Optional[int] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCamelCase ) , desc=_UpperCamelCase , leave=_UpperCamelCase ):
snake_case_ : int = process_run_single(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case_ : List[str] = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCamelCase ):
metrics.append(_UpperCamelCase )
results.append(_UpperCamelCase )
outcome += "✓"
else:
outcome += "✘"
snake_case_ : Any = f'''\33[2K\r{outcome}'''
if len(_UpperCamelCase ) > 0:
snake_case_ : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
snake_case_ : Any = round(mean_metrics[target_metric_key] , 2 )
snake_case_ : List[str] = f'''{outcome} {mean_target}'''
if len(_UpperCamelCase ) > 1:
results_str += f''' {tuple(round(_UpperCamelCase , 2 ) for x in results )}'''
print(_UpperCamelCase )
snake_case_ : Optional[int] = variation
return mean_metrics
else:
print(_UpperCamelCase )
return {variation_key: variation, target_metric_key: nan}
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Any = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return f'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : str = pd.DataFrame(_UpperCamelCase )
snake_case_ : Optional[int] = '''variation'''
snake_case_ : Union[str, Any] = '''diff_%'''
snake_case_ : Optional[int] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
snake_case_ : Optional[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCamelCase ):
# as a fallback, use the minimal value as the sentinel
snake_case_ : Any = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCamelCase ):
snake_case_ : Dict = df.apply(
lambda _UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
snake_case_ : Dict = [variation_key, target_metric_key, diff_key, *report_metric_keys]
snake_case_ : int = df.reindex(_UpperCamelCase , axis='''columns''' ) # reorder cols
# capitalize
snake_case_ : Optional[int] = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
snake_case_ : Any = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
snake_case_ : int = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
snake_case_ : Tuple = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )]
print('''\n\n'''.join(_UpperCamelCase ) )
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=_UpperCamelCase , type=_UpperCamelCase , nargs='''+''' , required=_UpperCamelCase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=_UpperCamelCase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=_UpperCamelCase , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=_UpperCamelCase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=_UpperCamelCase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
snake_case_ : Tuple = parser.parse_args()
snake_case_ : Optional[Any] = args.output_dir
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
snake_case_ : Optional[int] = get_base_command(_UpperCamelCase , _UpperCamelCase )
# split each dimension into its --foo variations
snake_case_ : Optional[int] = [list(map(str.strip , re.split(R'''\|''' , _UpperCamelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
snake_case_ : List[str] = list(map(str.strip , map(''' '''.join , itertools.product(*_UpperCamelCase ) ) ) )
snake_case_ : Optional[int] = max(len(_UpperCamelCase ) for x in variations )
# split wanted keys
snake_case_ : int = args.report_metric_keys.split()
# capture prints into a log file for convenience
snake_case_ : str = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
snake_case_ : Tuple = Tee(_UpperCamelCase )
print(f'''\n*** Running {len(_UpperCamelCase )} benchmarks:''' )
print(f'''Base command: {" ".join(_UpperCamelCase )}''' )
snake_case_ : List[Any] = '''variation'''
snake_case_ : Tuple = []
for id, variation in enumerate(tqdm(_UpperCamelCase , desc='''Total completion: ''' , leave=_UpperCamelCase ) ):
snake_case_ : Optional[Any] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.repeat_times , _UpperCamelCase , args.verbose , ) )
process_results(_UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.base_variation , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279 | 1 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = []
snake_case_ : Union[str, Any] = []
for i in range(self.num_layers ):
snake_case_ : Tuple = self.in_channels if i == 0 else self.out_channels
snake_case_ : List[Any] = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : List[Any] = resnets
snake_case_ : int = attentions
if self.add_downsample:
snake_case_ : str = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case_ : Union[str, Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : Dict = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : int = []
for i in range(self.num_layers ):
snake_case_ : Optional[int] = self.in_channels if i == 0 else self.out_channels
snake_case_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Union[str, Any] = resnets
if self.add_downsample:
snake_case_ : Any = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__=True ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = ()
for resnet in self.resnets:
snake_case_ : List[str] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : Tuple = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = []
snake_case_ : str = []
for i in range(self.num_layers ):
snake_case_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : Any = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : Optional[int] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Dict = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : Dict = resnets
snake_case_ : List[str] = attentions
if self.add_upsample:
snake_case_ : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> List[Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case_ : Optional[Any] = res_hidden_states_tuple[-1]
snake_case_ : List[str] = res_hidden_states_tuple[:-1]
snake_case_ : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : Any = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : Optional[int] = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
snake_case_ : Union[str, Any] = self.upsamplers_a(__magic_name__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : int = []
for i in range(self.num_layers ):
snake_case_ : Optional[int] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : Union[str, Any] = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : str = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Optional[Any] = resnets
if self.add_upsample:
snake_case_ : str = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Any:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
snake_case_ : List[str] = res_hidden_states_tuple[-1]
snake_case_ : List[str] = res_hidden_states_tuple[:-1]
snake_case_ : Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : Dict = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
snake_case_ : Tuple = self.upsamplers_a(__magic_name__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case_ : List[Any] = []
for _ in range(self.num_layers ):
snake_case_ : Any = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : str = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Dict = resnets
snake_case_ : List[Any] = attentions
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = self.resnets[0](__magic_name__ , __magic_name__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case_ : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : List[str] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
return hidden_states
| 279 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase_ = CLIPImageProcessor()
lowerCAmelCase_ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
lowerCAmelCase_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 279 | 1 |
import math
def lowerCamelCase_ ( _UpperCamelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( _UpperCamelCase = 10_001 ) -> int:
"""simple docstring"""
try:
snake_case_ : int = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
snake_case_ : list[int] = []
snake_case_ : Tuple = 2
while len(_UpperCamelCase ) < nth:
if is_prime(_UpperCamelCase ):
primes.append(_UpperCamelCase )
num += 1
else:
num += 1
return primes[len(_UpperCamelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 279 |
from math import factorial
lowerCAmelCase_ = {str(digit): factorial(digit) for digit in range(1_0)}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCamelCase ) )
def lowerCamelCase_ ( _UpperCamelCase = 60 , _UpperCamelCase = 1_000_000 ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
snake_case_ : Optional[Any] = 0
# the cached sizes of the previous chains
snake_case_ : dict[int, int] = {}
for start_chain_element in range(1 , _UpperCamelCase ):
# The temporary set will contain the elements of the chain
snake_case_ : List[str] = set()
snake_case_ : List[Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
snake_case_ : Any = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_UpperCamelCase )
chain_set_length += 1
snake_case_ : List[Any] = digit_factorial_sum(_UpperCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
snake_case_ : List[str] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 279 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = '''roc_bert'''
def __init__(self , __magic_name__=3_0522 , __magic_name__=768 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3072 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=True , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=None , __magic_name__=True , __magic_name__=True , __magic_name__=768 , __magic_name__=910 , __magic_name__=512 , __magic_name__=2_4858 , __magic_name__=True , **__magic_name__ , ) -> List[str]:
'''simple docstring'''
snake_case_ : int = vocab_size
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Dict = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Tuple = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Optional[int] = initializer_range
snake_case_ : int = type_vocab_size
snake_case_ : Dict = layer_norm_eps
snake_case_ : Union[str, Any] = use_cache
snake_case_ : Optional[int] = enable_pronunciation
snake_case_ : Union[str, Any] = enable_shape
snake_case_ : int = pronunciation_embed_dim
snake_case_ : Optional[Any] = pronunciation_vocab_size
snake_case_ : Dict = shape_embed_dim
snake_case_ : List[Any] = shape_vocab_size
snake_case_ : Optional[int] = concat_input
snake_case_ : Tuple = position_embedding_type
snake_case_ : Any = classifier_dropout
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
| 279 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = ''''''
lowerCamelCase_ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase_ : str = None # compression type in fsspec. ex: "gzip"
lowerCamelCase_ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(self , __magic_name__ = "" , __magic_name__ = None , __magic_name__ = None , **__magic_name__ ) -> Any:
'''simple docstring'''
super().__init__(self , **__magic_name__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case_ : Union[str, Any] = fsspec.open(
__magic_name__ , mode='''rb''' , protocol=__magic_name__ , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case_ : Tuple = os.path.basename(self.file.path.split('''::''' )[0] )
snake_case_ : Optional[Any] = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
snake_case_ : Dict = None
@classmethod
def lowerCamelCase (cls , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
return super()._strip_protocol(__magic_name__ ).lstrip('''/''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
if self.dir_cache is None:
snake_case_ : Optional[int] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
snake_case_ : List[str] = {f['''name''']: f}
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return self.file.open().read()
def lowerCamelCase (self , __magic_name__ , __magic_name__ = "rb" , __magic_name__=None , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = self._strip_protocol(__magic_name__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''bz2'''
lowerCamelCase_ : Any = '''bz2'''
lowerCamelCase_ : int = '''.bz2'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''gzip'''
lowerCamelCase_ : Dict = '''gzip'''
lowerCamelCase_ : int = '''.gz'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''lz4'''
lowerCamelCase_ : Any = '''lz4'''
lowerCamelCase_ : Optional[Any] = '''.lz4'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''xz'''
lowerCamelCase_ : Any = '''xz'''
lowerCamelCase_ : int = '''.xz'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''zstd'''
lowerCamelCase_ : Tuple = '''zstd'''
lowerCamelCase_ : Any = '''.zst'''
def __init__(self , __magic_name__ , __magic_name__ = "rb" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = DEFAULT_BLOCK_SIZE , **__magic_name__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
fo=__magic_name__ , mode=__magic_name__ , target_protocol=__magic_name__ , target_options=__magic_name__ , block_size=__magic_name__ , **__magic_name__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case_ : Dict = self.file.__enter__
class __lowerCAmelCase :
def __init__(self , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = file_
def __enter__(self ) -> List[Any]:
'''simple docstring'''
self._file.__enter__()
return self
def __exit__(self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
self._file.__exit__(*__magic_name__ , **__magic_name__ )
def __iter__(self ) -> Optional[int]:
'''simple docstring'''
return iter(self._file )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return next(self._file )
def __getattr__(self , __magic_name__ ) -> str:
'''simple docstring'''
return getattr(self._file , __magic_name__ )
def fixed_enter(*__magic_name__ , **__magic_name__ ):
return WrappedFile(_enter(*__magic_name__ , **__magic_name__ ) )
snake_case_ : Tuple = fixed_enter
| 279 | 1 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = parent
snake_case_ : Tuple = batch_size
snake_case_ : List[str] = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[Any] = use_attention_mask
snake_case_ : Optional[int] = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : Any = vocab_size
snake_case_ : str = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : str = intermediate_size
snake_case_ : List[str] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : Union[str, Any] = num_choices
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : str = None
if self.use_attention_mask:
snake_case_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Dict = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__magic_name__ , )
return config, input_ids, attention_mask
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs
snake_case_ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = FlaxDistilBertModelTester(self )
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Any = model_class_name.from_pretrained('''distilbert-base-uncased''' )
snake_case_ : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
snake_case_ : Optional[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case_ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ )[0]
snake_case_ : Optional[int] = (1, 11, 768)
self.assertEqual(output.shape , __magic_name__ )
snake_case_ : Union[str, Any] = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
| 279 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''megatron-bert'''
def __init__(self , __magic_name__=2_9056 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : List[str] = intermediate_size
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : int = initializer_range
snake_case_ : int = layer_norm_eps
snake_case_ : List[str] = position_embedding_type
snake_case_ : Dict = use_cache
| 279 | 1 |
import functools
from typing import Any
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bool:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or len(_UpperCamelCase ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not all(
isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
snake_case_ : dict[str, Any] = {}
snake_case_ : Dict = '''WORD_KEEPER'''
for word in words:
snake_case_ : Any = trie
for c in word:
if c not in trie_node:
snake_case_ : Union[str, Any] = {}
snake_case_ : str = trie_node[c]
snake_case_ : List[Any] = True
snake_case_ : List[Any] = len(_UpperCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(_UpperCamelCase ) -> bool:
if index == len_string:
return True
snake_case_ : int = trie
for i in range(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Optional[Any] = trie_node.get(string[i] , _UpperCamelCase )
if trie_node is None:
return False
if trie_node.get(_UpperCamelCase , _UpperCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase_ = random.Random()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
"""simple docstring"""
if rng is None:
snake_case_ : str = global_rng
snake_case_ : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=400 , __magic_name__=2000 , __magic_name__=10 , __magic_name__=160 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=4000 , __magic_name__=False , __magic_name__=True , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : str = batch_size
snake_case_ : Union[str, Any] = min_seq_length
snake_case_ : Tuple = max_seq_length
snake_case_ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ : Optional[int] = padding_value
snake_case_ : Union[str, Any] = sampling_rate
snake_case_ : Optional[int] = return_attention_mask
snake_case_ : str = do_normalize
snake_case_ : str = feature_size
snake_case_ : Optional[Any] = chunk_length
snake_case_ : Union[str, Any] = hop_length
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase (self , __magic_name__=False , __magic_name__=False ) -> Optional[Any]:
'''simple docstring'''
def _flatten(__magic_name__ ):
return list(itertools.chain(*__magic_name__ ) )
if equal_length:
snake_case_ : int = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case_ : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ : str = [np.asarray(__magic_name__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = WhisperFeatureExtractionTester(self )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Union[str, Any] = feat_extract_first.save_pretrained(__magic_name__ )[0]
check_json_file_has_correct_format(__magic_name__ )
snake_case_ : List[Any] = self.feature_extraction_class.from_pretrained(__magic_name__ )
snake_case_ : Optional[int] = feat_extract_first.to_dict()
snake_case_ : Dict = feat_extract_second.to_dict()
snake_case_ : List[str] = feat_extract_first.mel_filters
snake_case_ : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = os.path.join(__magic_name__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(__magic_name__ )
snake_case_ : Optional[int] = self.feature_extraction_class.from_json_file(__magic_name__ )
snake_case_ : int = feat_extract_first.to_dict()
snake_case_ : Optional[int] = feat_extract_second.to_dict()
snake_case_ : Union[str, Any] = feat_extract_first.mel_filters
snake_case_ : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ : str = [np.asarray(__magic_name__ ) for speech_input in speech_inputs]
# Test feature size
snake_case_ : str = feature_extractor(__magic_name__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case_ : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
snake_case_ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test batched
snake_case_ : int = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case_ : List[str] = np.asarray(__magic_name__ )
snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
snake_case_ : Dict = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test truncation required
snake_case_ : Any = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
snake_case_ : Union[str, Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs]
snake_case_ : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case_ : Optional[Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs_truncated]
snake_case_ : Any = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
import torch
snake_case_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa )
snake_case_ : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
snake_case_ : Optional[Any] = ds.sort('''id''' ).select(range(__magic_name__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : str = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
snake_case_ : List[Any] = self._load_datasamples(1 )
snake_case_ : Union[str, Any] = WhisperFeatureExtractor()
snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __magic_name__ , atol=1e-4 ) )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Optional[int] = self._load_datasamples(1 )[0]
snake_case_ : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
snake_case_ : Optional[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__magic_name__ )[0]
self.assertTrue(np.all(np.mean(__magic_name__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__magic_name__ ) - 1 ) < 1e-3 ) )
| 279 | 1 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = [[float('''inf''' ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case_ : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_UpperCamelCase ):
# looping through rows of graph array
for i in range(_UpperCamelCase ):
# looping through columns of graph array
for j in range(_UpperCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case_ : List[Any] = dist[i][k] + dist[k][j]
_print_dist(_UpperCamelCase , _UpperCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('''Enter number of vertices: '''))
lowerCAmelCase_ = int(input('''Enter number of edges: '''))
lowerCAmelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowerCAmelCase_ = int(input('''Enter source:'''))
lowerCAmelCase_ = int(input('''Enter destination:'''))
lowerCAmelCase_ = float(input('''Enter weight:'''))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 279 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=_UpperCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=_UpperCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=_UpperCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=_UpperCamelCase , default=1_000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=_UpperCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=_UpperCamelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=_UpperCamelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
snake_case_ : List[Any] = parser.parse_args()
return args
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
def fn(_UpperCamelCase ):
return tokenizer(examples['''text'''] )
return fn
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Any = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
snake_case_ : Any = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
snake_case_ : Optional[int] = tf.train.Features(feature=_UpperCamelCase )
snake_case_ : Optional[Any] = tf.train.Example(features=_UpperCamelCase )
snake_case_ : Optional[Any] = example.SerializeToString()
records.append(_UpperCamelCase )
return records
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : int = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
snake_case_ : Union[str, Any] = min(len(_UpperCamelCase ) , args.limit )
snake_case_ : int = dataset.select(range(_UpperCamelCase ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
snake_case_ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
snake_case_ : str = os.path.join(args.output_dir , args.split )
if not os.path.exists(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
else:
snake_case_ : Optional[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
snake_case_ : Optional[Any] = tokenize_function(_UpperCamelCase )
snake_case_ : List[Any] = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_UpperCamelCase ):
# Concatenate all texts.
snake_case_ : Tuple = {k: sum(examples[k] , [] ) for k in examples.keys()}
snake_case_ : List[str] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
snake_case_ : int = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
snake_case_ : Union[str, Any] = {
k: [t[i : i + args.max_length] for i in range(0 , _UpperCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
snake_case_ : int = dataset_tokenized.map(_UpperCamelCase , batched=_UpperCamelCase , batch_size=1_000 , num_proc=4 )
snake_case_ : str = 0
snake_case_ : Optional[Any] = 0
for shard in range(0 , len(_UpperCamelCase ) , args.shard_size ):
snake_case_ : Any = grouped_dataset[shard : shard + args.shard_size]
snake_case_ : str = len(dataset_snapshot['''input_ids'''] )
snake_case_ : Union[str, Any] = os.path.join(_UpperCamelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
snake_case_ : Dict = get_serialized_examples(_UpperCamelCase )
with tf.io.TFRecordWriter(_UpperCamelCase ) as out_file:
for i in range(len(_UpperCamelCase ) ):
snake_case_ : List[str] = serialized_examples[i]
out_file.write(_UpperCamelCase )
print('''Wrote file {} containing {} records'''.format(_UpperCamelCase , _UpperCamelCase ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
main(args)
| 279 | 1 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCAmelCase_ = datasets.logging.get_logger(__name__)
lowerCAmelCase_ = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
lowerCAmelCase_ = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
lowerCAmelCase_ = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
lowerCAmelCase_ = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def lowerCamelCase (self , __magic_name__ ) -> List[Any]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
snake_case_ : Dict = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
snake_case_ : Optional[int] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
snake_case_ : Union[str, Any] = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
snake_case_ : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
snake_case_ : Dict = score.BleurtScorer(os.path.join(__magic_name__ , __magic_name__ ) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.scorer.score(references=__magic_name__ , candidates=__magic_name__ )
return {"scores": scores}
| 279 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Any = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ : List[Any] = VideoClassificationPipeline(model=__magic_name__ , image_processor=__magic_name__ , top_k=2 )
snake_case_ : str = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
for example in examples:
snake_case_ : Union[str, Any] = video_classifier(__magic_name__ )
self.assertEqual(
__magic_name__ , [
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
] , )
@require_torch
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Any = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
snake_case_ : str = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
snake_case_ : int = pipeline(
'''video-classification''' , model=__magic_name__ , feature_extractor=__magic_name__ , frame_sampling_rate=4 )
snake_case_ : List[str] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ : Union[str, Any] = video_classifier(__magic_name__ , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , )
snake_case_ : int = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
| 279 | 1 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
snake_case_ : Tuple = load_dataset('''ashraq/esc50''' )
snake_case_ : List[str] = dataset['''train''']['''audio'''][-1]['''array''']
snake_case_ : Optional[int] = audio_classifier(__magic_name__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__magic_name__ ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : List[str] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
snake_case_ : Dict = load_dataset('''ashraq/esc50''' )
snake_case_ : List[Any] = dataset['''train''']['''audio'''][-1]['''array''']
snake_case_ : int = audio_classifier(__magic_name__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
snake_case_ : List[str] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
snake_case_ : List[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
| 279 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 279 | 1 |
import os
import sys
import unittest
lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase_ = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCAmelCase_ = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = get_test_to_tester_mapping(__magic_name__ )
snake_case_ : List[str] = get_test_to_tester_mapping(__magic_name__ )
snake_case_ : List[Any] = {'''BertModelTest''': '''BertModelTester'''}
snake_case_ : List[str] = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(__magic_name__ ) , __magic_name__ )
self.assertEqual(get_test_info.to_json(__magic_name__ ) , __magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_model_to_test_mapping(__magic_name__ )
snake_case_ : Any = get_model_to_test_mapping(__magic_name__ )
snake_case_ : Optional[int] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
snake_case_ : Any = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(__magic_name__ ) , __magic_name__ )
self.assertEqual(get_test_info.to_json(__magic_name__ ) , __magic_name__ )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = get_model_to_tester_mapping(__magic_name__ )
snake_case_ : Dict = get_model_to_tester_mapping(__magic_name__ )
snake_case_ : Union[str, Any] = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
snake_case_ : List[Any] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(__magic_name__ ) , __magic_name__ )
self.assertEqual(get_test_info.to_json(__magic_name__ ) , __magic_name__ )
| 279 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase_ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : str = list(s_dict.keys() )
for key in keys:
snake_case_ : Optional[int] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case_ : List[str] = new_key.replace(_UpperCamelCase , _UpperCamelCase )
print(f'''{key} -> {new_key}''' )
snake_case_ : Tuple = s_dict.pop(_UpperCamelCase )
return s_dict
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ , snake_case_ : Dict = emb.weight.shape
snake_case_ : Tuple = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
snake_case_ : Any = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bytes:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : List[Any] = os.path.basename(_UpperCamelCase )
snake_case_ : Any = url.split('''/''' )[-2]
snake_case_ : str = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_UpperCamelCase ):
snake_case_ : Union[str, Any] = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_UpperCamelCase , unit_divisor=1_024 ) as loop:
while True:
snake_case_ : Dict = source.read(8_192 )
if not buffer:
break
output.write(_UpperCamelCase )
loop.update(len(_UpperCamelCase ) )
snake_case_ : Any = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if ".pt" not in checkpoint_path:
snake_case_ : str = _download(_MODELS[checkpoint_path] )
else:
snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' )
snake_case_ : int = original_checkpoint['''dims''']
snake_case_ : List[str] = original_checkpoint['''model_state_dict''']
snake_case_ : str = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
snake_case_ : Optional[int] = True
snake_case_ : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
snake_case_ : List[str] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
snake_case_ : Union[str, Any] = WhisperForConditionalGeneration(_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
snake_case_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ : Any = proj_out_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 279 | 1 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase_ = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase_ = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase_ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase_ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase_ = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase_ = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase_ = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ , snake_case_ : Any = randrange(len(_UpperCamelCase ) ), randrange(len(_UpperCamelCase ) )
snake_case_ : Any = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
snake_case_ , snake_case_ : Tuple = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase_ ( _UpperCamelCase = 100 ) -> str:
"""simple docstring"""
return (generate_random_hand() for _ in range(_UpperCamelCase ))
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : str = PokerHand(_UpperCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Dict = [PokerHand(_UpperCamelCase ) for hand in SORTED_HANDS]
snake_case_ : str = poker_hands.copy()
shuffle(_UpperCamelCase )
snake_case_ : List[str] = chain(sorted(_UpperCamelCase ) )
for index, hand in enumerate(_UpperCamelCase ):
assert hand == poker_hands[index]
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=_UpperCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Dict = PokerHand('''2C 4S AS 3D 5C''' )
snake_case_ : str = True
snake_case_ : Tuple = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
snake_case_ : List[str] = 0
snake_case_ : Union[str, Any] = os.path.abspath(os.path.dirname(_UpperCamelCase ) )
snake_case_ : Dict = os.path.join(_UpperCamelCase , '''poker_hands.txt''' )
with open(_UpperCamelCase ) as file_hand:
for line in file_hand:
snake_case_ : Dict = line[:14].strip()
snake_case_ : List[str] = line[15:].strip()
snake_case_ , snake_case_ : str = PokerHand(_UpperCamelCase ), PokerHand(_UpperCamelCase )
snake_case_ : int = player.compare_with(_UpperCamelCase )
if output == "Win":
answer += 1
assert answer == 376
| 279 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = original_name.split('''.''' )[0]
snake_case_ : int = key.split('''.''' )
snake_case_ : List[str] = int(key_list[key_list.index(_UpperCamelCase ) - 2] )
snake_case_ : Tuple = int(key_list[key_list.index(_UpperCamelCase ) - 1] )
snake_case_ : Optional[Any] = orig_block_num - offset
snake_case_ : List[str] = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : Dict = OrderedDict()
snake_case_ , snake_case_ : Dict = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case_ : Tuple = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case_ : List[Any] = key[: key.find('''proj''' )]
snake_case_ : int = key.replace(_UpperCamelCase , f'''patch_embeddings.{total_embed_found}.''' )
snake_case_ : str = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case_ : List[Any] = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case_ : str = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case_ : Optional[Any] = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case_ : Dict = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case_ : Union[str, Any] = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case_ : Dict = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case_ : List[Any] = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case_ : Optional[Any] = key.replace('''head''' , '''classifier''' )
snake_case_ : List[str] = value
return new_state_dict
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ : List[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return image
@torch.no_grad()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : int = PoolFormerConfig()
# set attributes based on model_name
snake_case_ : Union[str, Any] = '''huggingface/label-files'''
snake_case_ : Any = model_name[-3:]
snake_case_ : Any = 1_000
snake_case_ : Optional[Any] = '''imagenet-1k-id2label.json'''
snake_case_ : Union[str, Any] = (1, 1_000)
# set config attributes
snake_case_ : Optional[Any] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : Any = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Optional[int] = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case_ : Optional[Any] = [2, 2, 6, 2]
snake_case_ : Optional[int] = [64, 128, 320, 512]
snake_case_ : Optional[int] = 4.0
snake_case_ : Any = 0.9
elif size == "s24":
snake_case_ : List[Any] = [4, 4, 12, 4]
snake_case_ : str = [64, 128, 320, 512]
snake_case_ : Union[str, Any] = 4.0
snake_case_ : Optional[Any] = 0.9
elif size == "s36":
snake_case_ : Optional[int] = [6, 6, 18, 6]
snake_case_ : List[str] = [64, 128, 320, 512]
snake_case_ : Dict = 4.0
snake_case_ : Union[str, Any] = 1E-6
snake_case_ : Union[str, Any] = 0.9
elif size == "m36":
snake_case_ : str = [6, 6, 18, 6]
snake_case_ : Dict = [96, 192, 384, 768]
snake_case_ : List[Any] = 4.0
snake_case_ : List[Any] = 1E-6
snake_case_ : str = 0.95
elif size == "m48":
snake_case_ : List[Any] = [8, 8, 24, 8]
snake_case_ : Union[str, Any] = [96, 192, 384, 768]
snake_case_ : Optional[int] = 4.0
snake_case_ : Dict = 1E-6
snake_case_ : Any = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
snake_case_ : Optional[Any] = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
# Prepare image
snake_case_ : Tuple = prepare_img()
snake_case_ : str = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
snake_case_ : List[Any] = torch.load(_UpperCamelCase , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case_ : Optional[int] = rename_keys(_UpperCamelCase )
# create HuggingFace model and load state dict
snake_case_ : str = PoolFormerForImageClassification(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Define image processor
snake_case_ : str = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
snake_case_ : Any = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case_ : List[Any] = model(_UpperCamelCase )
snake_case_ : Union[str, Any] = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case_ : List[str] = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
snake_case_ : Optional[int] = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
snake_case_ : Dict = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
snake_case_ : Dict = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
snake_case_ : Dict = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 279 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : str
lowerCamelCase_ : List[str]
lowerCamelCase_ : Optional[List[str]]
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : List[int]
lowerCamelCase_ : List[int]
lowerCamelCase_ : Optional[List[int]] = None
lowerCamelCase_ : Optional[List[int]] = None
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : str = '''train'''
lowerCamelCase_ : List[str] = '''dev'''
lowerCamelCase_ : List[Any] = '''test'''
class __lowerCAmelCase :
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ ) -> List[InputExample]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def lowerCamelCase (__magic_name__ ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__="[CLS]" , __magic_name__=1 , __magic_name__="[SEP]" , __magic_name__=False , __magic_name__=False , __magic_name__=0 , __magic_name__=0 , __magic_name__=-100 , __magic_name__=0 , __magic_name__=True , ) -> List[InputFeatures]:
'''simple docstring'''
snake_case_ : Optional[int] = {label: i for i, label in enumerate(__magic_name__ )}
snake_case_ : Dict = []
for ex_index, example in enumerate(__magic_name__ ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' , __magic_name__ , len(__magic_name__ ) )
snake_case_ : List[str] = []
snake_case_ : List[str] = []
for word, label in zip(example.words , example.labels ):
snake_case_ : Optional[Any] = tokenizer.tokenize(__magic_name__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__magic_name__ ) > 0:
tokens.extend(__magic_name__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__magic_name__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
snake_case_ : Union[str, Any] = tokenizer.num_special_tokens_to_add()
if len(__magic_name__ ) > max_seq_length - special_tokens_count:
snake_case_ : str = tokens[: (max_seq_length - special_tokens_count)]
snake_case_ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
snake_case_ : Union[str, Any] = [sequence_a_segment_id] * len(__magic_name__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
snake_case_ : Union[str, Any] = [cls_token] + tokens
snake_case_ : List[Any] = [pad_token_label_id] + label_ids
snake_case_ : Optional[Any] = [cls_token_segment_id] + segment_ids
snake_case_ : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
snake_case_ : int = [1 if mask_padding_with_zero else 0] * len(__magic_name__ )
# Zero-pad up to the sequence length.
snake_case_ : Optional[int] = max_seq_length - len(__magic_name__ )
if pad_on_left:
snake_case_ : Optional[Any] = ([pad_token] * padding_length) + input_ids
snake_case_ : Optional[int] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
snake_case_ : Optional[Any] = ([pad_token_segment_id] * padding_length) + segment_ids
snake_case_ : Dict = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__magic_name__ ) == max_seq_length
assert len(__magic_name__ ) == max_seq_length
assert len(__magic_name__ ) == max_seq_length
assert len(__magic_name__ ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(__magic_name__ ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
snake_case_ : int = None
features.append(
InputFeatures(
input_ids=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , label_ids=__magic_name__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[InputFeatures]
lowerCamelCase_ : int = nn.CrossEntropyLoss().ignore_index
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = os.path.join(
__magic_name__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__magic_name__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : Dict = cached_features_file + '''.lock'''
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
snake_case_ : Dict = torch.load(__magic_name__ )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
snake_case_ : Any = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case_ : int = token_classification_task.convert_examples_to_features(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , __magic_name__ )
def __len__(self ) -> Optional[Any]:
'''simple docstring'''
return len(self.features )
def __getitem__(self , __magic_name__ ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __lowerCAmelCase :
lowerCamelCase_ : List[InputFeatures]
lowerCamelCase_ : int = -100
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case_ : int = token_classification_task.convert_examples_to_features(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
snake_case_ : Optional[Any] = tf.data.Dataset.from_generator(
__magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
snake_case_ : int = tf.data.Dataset.from_generator(
__magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__(self ) -> str:
'''simple docstring'''
return len(self.features )
def __getitem__(self , __magic_name__ ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
| 279 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : str = '''vit_msn'''
def __init__(self , __magic_name__=768 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3072 , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1e-06 , __magic_name__=224 , __magic_name__=16 , __magic_name__=3 , __magic_name__=True , **__magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : int = hidden_size
snake_case_ : int = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : str = initializer_range
snake_case_ : str = layer_norm_eps
snake_case_ : str = image_size
snake_case_ : Union[str, Any] = patch_size
snake_case_ : int = num_channels
snake_case_ : Optional[Any] = qkv_bias
| 279 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = SpeechTaTokenizer
lowerCamelCase_ : int = False
lowerCamelCase_ : Dict = True
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : Tuple = SpeechTaTokenizer(__magic_name__ )
snake_case_ : Any = AddedToken('''<mask>''' , lstrip=__magic_name__ , rstrip=__magic_name__ )
snake_case_ : int = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = '''this is a test'''
snake_case_ : int = '''this is a test'''
return input_text, output_text
def lowerCamelCase (self , __magic_name__ , __magic_name__=False , __magic_name__=20 , __magic_name__=5 ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : int = self.get_input_output_texts(__magic_name__ )
snake_case_ : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
snake_case_ : Any = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
return text, ids
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = '''<pad>'''
snake_case_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__magic_name__ ) , 81 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : int = tokenizer.vocab_size
snake_case_ : Optional[Any] = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ : List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
snake_case_ : List[Any] = tokenizer.add_tokens(__magic_name__ )
snake_case_ : Dict = tokenizer.vocab_size
snake_case_ : Optional[Any] = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size + len(__magic_name__ ) )
snake_case_ : Union[str, Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ : Union[str, Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
snake_case_ : List[str] = tokenizer.add_special_tokens(__magic_name__ )
snake_case_ : Dict = tokenizer.vocab_size
snake_case_ : Dict = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size_a + len(__magic_name__ ) )
snake_case_ : Tuple = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : Optional[Any] = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__magic_name__ , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
snake_case_ : List[str] = tokenizer.convert_tokens_to_ids(__magic_name__ )
# fmt: off
self.assertListEqual(__magic_name__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ : int = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
snake_case_ : List[Any] = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__magic_name__ , )
| 279 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
"""simple docstring"""
try:
snake_case_ : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case_ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
snake_case_ : Optional[int] = strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
lowerCAmelCase_ = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
"""simple docstring"""
if test_case is None:
return partial(_UpperCamelCase , version=_UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , _UpperCamelCase ) , f'''test requires torch version >= {version}''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(_UpperCamelCase )
lowerCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(_UpperCamelCase )
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Union[str, Any] = True
@classmethod
def lowerCamelCase (cls ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = tempfile.mkdtemp()
@classmethod
def lowerCamelCase (cls ) -> List[str]:
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__magic_name__ )
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = mocks if isinstance(__magic_name__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = AcceleratorState()
snake_case_ : str = tensor[None].clone().to(state.device )
snake_case_ : Dict = gather(_UpperCamelCase ).cpu()
snake_case_ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCamelCase ):
return False
return True
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = returncode
snake_case_ : Union[str, Any] = stdout
snake_case_ : List[str] = stderr
async def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
while True:
snake_case_ : List[str] = await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(_UpperCamelCase ) )
snake_case_ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case_ : Optional[Any] = []
snake_case_ : Any = []
def tee(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ):
snake_case_ : List[str] = line.decode('''utf-8''' ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=180 , _UpperCamelCase=False , _UpperCamelCase=True ) -> _RunOutput:
"""simple docstring"""
snake_case_ : int = asyncio.get_event_loop()
snake_case_ : Tuple = loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
snake_case_ : Any = ''' '''.join(_UpperCamelCase )
if result.returncode > 0:
snake_case_ : Union[str, Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class __lowerCAmelCase ( _a ):
pass
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> Any:
"""simple docstring"""
try:
snake_case_ : Dict = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCamelCase , '''decode''' ):
snake_case_ : Optional[Any] = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 279 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = [[float('''inf''' ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case_ : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_UpperCamelCase ):
# looping through rows of graph array
for i in range(_UpperCamelCase ):
# looping through columns of graph array
for j in range(_UpperCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case_ : List[Any] = dist[i][k] + dist[k][j]
_print_dist(_UpperCamelCase , _UpperCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('''Enter number of vertices: '''))
lowerCAmelCase_ = int(input('''Enter number of edges: '''))
lowerCAmelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowerCAmelCase_ = int(input('''Enter source:'''))
lowerCAmelCase_ = int(input('''Enter destination:'''))
lowerCAmelCase_ = float(input('''Enter weight:'''))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 279 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.