code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a =getLogger(__name__)
a ='cuda' if torch.cuda.is_available() else 'cpu'
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 8 , __lowerCAmelCase = DEFAULT_DEVICE , __lowerCAmelCase=False , __lowerCAmelCase="summarization" , __lowerCAmelCase=None , **__lowerCAmelCase , ) -> Dict:
'''simple docstring'''
lowerCamelCase__ =Path(__lowerCAmelCase ).open("w" , encoding="utf-8" )
lowerCamelCase__ =str(__lowerCAmelCase )
lowerCamelCase__ =AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
if fpaa:
lowerCamelCase__ =model.half()
lowerCamelCase__ =AutoTokenizer.from_pretrained(__lowerCAmelCase )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
lowerCamelCase__ =time.time()
# update config with task specific params
use_task_specific_params(__lowerCAmelCase , __lowerCAmelCase )
if prefix is None:
lowerCamelCase__ =prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(__lowerCAmelCase , __lowerCAmelCase ) ) ):
lowerCamelCase__ =[prefix + text for text in examples_chunk]
lowerCamelCase__ =tokenizer(__lowerCAmelCase , return_tensors="pt" , truncation=__lowerCAmelCase , padding="longest" ).to(__lowerCAmelCase )
lowerCamelCase__ =model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__lowerCAmelCase , )
lowerCamelCase__ =tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
lowerCamelCase__ =int(time.time() - start_time ) # seconds
lowerCamelCase__ =len(__lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def lowerCamelCase_ ( __lowerCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ =argparse.ArgumentParser()
parser.add_argument("model_name" , type=__lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=__lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=__lowerCAmelCase , help="where to save summaries" )
parser.add_argument("--reference_path" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=__lowerCAmelCase , required=__lowerCAmelCase , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=__lowerCAmelCase , required=__lowerCAmelCase , default=__lowerCAmelCase , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=__lowerCAmelCase , required=__lowerCAmelCase , default=__lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=__lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__lowerCAmelCase , default=8 , required=__lowerCAmelCase , help="batch size" )
parser.add_argument(
"--n_obs" , type=__lowerCAmelCase , default=-1 , required=__lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=__lowerCAmelCase , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
lowerCamelCase__ , lowerCamelCase__ =parser.parse_known_args()
lowerCamelCase__ =parse_numeric_n_bool_cl_kwargs(__lowerCAmelCase )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
lowerCamelCase__ =[" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
lowerCamelCase__ =examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
lowerCamelCase__ =generate_summaries_or_translations(
__lowerCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__lowerCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
lowerCamelCase__ =calculate_bleu if "translation" in args.task else calculate_rouge
lowerCamelCase__ =[x.rstrip() for x in open(args.save_path ).readlines()]
lowerCamelCase__ =[x.rstrip() for x in open(args.reference_path ).readlines()][: len(__lowerCAmelCase )]
lowerCamelCase__ =score_fn(__lowerCAmelCase , __lowerCAmelCase )
scores.update(__lowerCAmelCase )
if args.dump_args:
scores.update(__lowerCAmelCase )
if args.info:
lowerCamelCase__ =args.info
if verbose:
print(__lowerCAmelCase )
if args.score_path is not None:
json.dump(__lowerCAmelCase , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 530 | """simple docstring"""
from __future__ import annotations
from typing import Any
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 ):
lowerCamelCase__ , lowerCamelCase__ =row, column
lowerCamelCase__ =[[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self ):
lowerCamelCase__ =F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCamelCase__ =0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase__ =max(_lowerCamelCase , len(str(_lowerCamelCase ) ) )
lowerCamelCase__ =F'''%{max_element_length}s'''
# Make string and return
def single_line(_lowerCamelCase ) -> str:
nonlocal string_format_identifier
lowerCamelCase__ ="["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def _a ( self , _lowerCamelCase ):
if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , _lowerCamelCase ):
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , _lowerCamelCase , _lowerCamelCase ):
assert self.validate_indicies(_lowerCamelCase )
lowerCamelCase__ =value
def __add__( self , _lowerCamelCase ):
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase__ =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ =self[r, c] + another[r, c]
return result
def __neg__( self ):
lowerCamelCase__ =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ =-self[r, c]
return result
def __sub__( self , _lowerCamelCase ):
return self + (-another)
def __mul__( self , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication
lowerCamelCase__ =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ =self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
lowerCamelCase__ =Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase__ =F'''Unsupported type given for another ({type(_lowerCamelCase )})'''
raise TypeError(_lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ =self[r, c]
return result
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase__ =v.transpose()
lowerCamelCase__ =(v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
lowerCamelCase__ =Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCamelCase__ =1
print(F'''a^(-1) is {ainv}''' )
# u, v
lowerCamelCase__ =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =1, 2, -3
lowerCamelCase__ =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowerCAmelCase , __lowerCAmelCase )}''' )
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 530 | 1 |
'''simple docstring'''
_A: int = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 617 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 617 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = AutoConfig.from_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
SCREAMING_SNAKE_CASE_: Tuple = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
SCREAMING_SNAKE_CASE_: Optional[Any] = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE_: Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
SCREAMING_SNAKE_CASE_: List[Any] = f"layers_{str(_UpperCAmelCase )}"
# Self-Attention
SCREAMING_SNAKE_CASE_: Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
SCREAMING_SNAKE_CASE_: Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
SCREAMING_SNAKE_CASE_: List[str] = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
SCREAMING_SNAKE_CASE_: Dict = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
SCREAMING_SNAKE_CASE_: str = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
SCREAMING_SNAKE_CASE_: Optional[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
SCREAMING_SNAKE_CASE_: Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
SCREAMING_SNAKE_CASE_: str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
SCREAMING_SNAKE_CASE_: int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
SCREAMING_SNAKE_CASE_: Tuple = flax_model.params["encoder"]["block"][str(_UpperCAmelCase )]["layer"]
SCREAMING_SNAKE_CASE_: List[Any] = tax_attention_key
SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_attention_out
SCREAMING_SNAKE_CASE_: Any = tax_attention_query
SCREAMING_SNAKE_CASE_: int = tax_attention_value
SCREAMING_SNAKE_CASE_: Optional[Any] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_global_layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE_: Optional[int] = tax_mlp_wi_a
SCREAMING_SNAKE_CASE_: Tuple = tax_mlp_wi_a
else:
SCREAMING_SNAKE_CASE_: Optional[Any] = tax_mlp_wi
SCREAMING_SNAKE_CASE_: Tuple = tax_mlp_wo
SCREAMING_SNAKE_CASE_: Optional[Any] = tax_mlp_layer_norm
SCREAMING_SNAKE_CASE_: Optional[Any] = flax_model_encoder_layer_block
# Only for layer 0:
SCREAMING_SNAKE_CASE_: Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE_: List[str] = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_encoder_global_rel_embedding
# Assigning
SCREAMING_SNAKE_CASE_: Optional[Any] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
SCREAMING_SNAKE_CASE_: Tuple = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
SCREAMING_SNAKE_CASE_: Optional[int] = f"layers_{str(_UpperCAmelCase )}"
# Self-Attention
SCREAMING_SNAKE_CASE_: List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
SCREAMING_SNAKE_CASE_: Dict = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
SCREAMING_SNAKE_CASE_: List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
SCREAMING_SNAKE_CASE_: Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
SCREAMING_SNAKE_CASE_: Any = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
SCREAMING_SNAKE_CASE_: Optional[int] = tax_enc_dec_attention_module["key"]["kernel"]
SCREAMING_SNAKE_CASE_: Any = tax_enc_dec_attention_module["out"]["kernel"]
SCREAMING_SNAKE_CASE_: Dict = tax_enc_dec_attention_module["query"]["kernel"]
SCREAMING_SNAKE_CASE_: int = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
SCREAMING_SNAKE_CASE_: str = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
SCREAMING_SNAKE_CASE_: int = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
SCREAMING_SNAKE_CASE_: List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
SCREAMING_SNAKE_CASE_: List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
SCREAMING_SNAKE_CASE_: Tuple = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
SCREAMING_SNAKE_CASE_: Dict = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
SCREAMING_SNAKE_CASE_: Tuple = flax_model.params["decoder"]["block"][str(_UpperCAmelCase )]["layer"]
SCREAMING_SNAKE_CASE_: Optional[int] = tax_attention_key
SCREAMING_SNAKE_CASE_: int = tax_attention_out
SCREAMING_SNAKE_CASE_: Optional[int] = tax_attention_query
SCREAMING_SNAKE_CASE_: List[str] = tax_attention_value
SCREAMING_SNAKE_CASE_: Any = tax_pre_attention_layer_norm
SCREAMING_SNAKE_CASE_: int = tax_enc_dec_attention_key
SCREAMING_SNAKE_CASE_: Dict = tax_enc_dec_attention_out
SCREAMING_SNAKE_CASE_: Tuple = tax_enc_dec_attention_query
SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_enc_dec_attention_value
SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_cross_layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE_: Any = tax_mlp_wi_a
SCREAMING_SNAKE_CASE_: Optional[int] = tax_mlp_wi_a
else:
SCREAMING_SNAKE_CASE_: Any = tax_mlp_wi
SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_mlp_wo
SCREAMING_SNAKE_CASE_: Optional[Any] = txa_mlp_layer_norm
SCREAMING_SNAKE_CASE_: Union[str, Any] = flax_model_decoder_layer_block
# Decoder Normalization
SCREAMING_SNAKE_CASE_: Any = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
SCREAMING_SNAKE_CASE_: str = txa_decoder_norm
# Only for layer 0:
SCREAMING_SNAKE_CASE_: Tuple = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
SCREAMING_SNAKE_CASE_: List[str] = tax_decoder_rel_embedding
# Token Embeddings
SCREAMING_SNAKE_CASE_: str = tax_model["target"]["token_embedder"]["embedding"]
SCREAMING_SNAKE_CASE_: Tuple = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_UpperCAmelCase )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 671 |
import math
def A_ ( _UpperCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_ ( _UpperCAmelCase = 0.1 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Optional[int] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_UpperCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__A =logging.get_logger(__name__)
__A ={
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'dpt'
def __init__( self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0_2 , lowercase=1e-12 , lowercase=384 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=True , lowercase=[2, 5, 8, 11] , lowercase="project" , lowercase=[4, 2, 1, 0.5] , lowercase=[96, 192, 384, 768] , lowercase=256 , lowercase=-1 , lowercase=False , lowercase=True , lowercase=0.4 , lowercase=255 , lowercase=0.1 , lowercase=[1, 1024, 24, 24] , lowercase=[0, 1] , lowercase=None , **lowercase , ) -> Any:
super().__init__(**lowercase )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
lowerCamelCase_ = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
lowerCamelCase_ = BitConfig(**lowercase )
elif isinstance(lowercase , lowercase ):
logger.info("Initializing the config with a `BiT` backbone." )
lowerCamelCase_ = BitConfig(**lowercase )
elif isinstance(lowercase , lowercase ):
lowerCamelCase_ = backbone_config
else:
raise ValueError(
f'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
lowerCamelCase_ = backbone_featmap_shape
lowerCamelCase_ = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = []
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
lowerCamelCase_ = readout_type
lowerCamelCase_ = reassemble_factors
lowerCamelCase_ = neck_hidden_sizes
lowerCamelCase_ = fusion_hidden_size
lowerCamelCase_ = head_in_index
lowerCamelCase_ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCamelCase_ = use_auxiliary_head
lowerCamelCase_ = auxiliary_loss_weight
lowerCamelCase_ = semantic_loss_ignore_index
lowerCamelCase_ = semantic_classifier_dropout
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCamelCase_ = self.backbone_config.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
| 313 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
lowerCAmelCase__ = 'pixel_values'
lowerCAmelCase__ = False
lowerCAmelCase__ = TimmBackboneConfig
def __init__( self , lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(self , "timm" )
super().__init__(lowercase )
lowerCamelCase_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f'backbone {config.backbone} is not supported by timm.' )
if hasattr(lowercase , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
lowerCamelCase_ = getattr(lowercase , "use_pretrained_backbone" , lowercase )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCamelCase_ = config.out_indices if getattr(lowercase , "out_indices" , lowercase ) is not None else (-1,)
lowerCamelCase_ = timm.create_model(
config.backbone , pretrained=lowercase , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowercase , **lowercase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCamelCase_ = self._backbone.return_layers
lowerCamelCase_ = {layer["module"]: str(lowercase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowercase )
@classmethod
def SCREAMING_SNAKE_CASE_( cls , lowercase , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCamelCase_ = kwargs.pop("config" , TimmBackboneConfig() )
lowerCamelCase_ = kwargs.pop("use_timm_backbone" , lowercase )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
lowerCamelCase_ = kwargs.pop("num_channels" , config.num_channels )
lowerCamelCase_ = kwargs.pop("features_only" , config.features_only )
lowerCamelCase_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
lowerCamelCase_ = kwargs.pop("out_indices" , config.out_indices )
lowerCamelCase_ = TimmBackboneConfig(
backbone=lowercase , num_channels=lowercase , features_only=lowercase , use_pretrained_backbone=lowercase , out_indices=lowercase , )
return super()._from_config(lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[Any]:
pass
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None , lowercase=None , lowercase=None , **lowercase ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCamelCase_ = self._all_layers
lowerCamelCase_ = self._backbone(lowercase , **lowercase )
lowerCamelCase_ = self._return_layers
lowerCamelCase_ = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCamelCase_ = self._backbone(lowercase , **lowercase )
lowerCamelCase_ = None
lowerCamelCase_ = tuple(lowercase )
lowerCamelCase_ = tuple(lowercase ) if hidden_states is not None else None
if not return_dict:
lowerCamelCase_ = (feature_maps,)
if output_hidden_states:
lowerCamelCase_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowercase , hidden_states=lowercase , attentions=lowercase )
| 313 | 1 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
__lowerCamelCase : Union[str, Any] = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 310 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Dict = logging.get_logger(__name__)
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCAmelCase_ , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
lowercase = torch.load(hf_hub_download(repo_id=lowerCAmelCase_ , filename="pytorch_model.bin" ) )
lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
lowercase = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
lowercase = tensor_value
lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCAmelCase_ , config=lowerCAmelCase_ , state_dict=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
# convert tokenizer
lowercase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCamelCase : str = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 310 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__a = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def A_ ( ):
'''simple docstring'''
snake_case_ :Dict = _ask_options(
"""In which compute environment are you running?""", ["""This machine""", """AWS (Amazon SageMaker)"""], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
snake_case_ :Optional[int] = get_sagemaker_input()
else:
snake_case_ :int = get_cluster_input()
return config
def A_ ( _lowercase=None ):
'''simple docstring'''
if subparsers is not None:
snake_case_ :Dict = subparsers.add_parser("""config""", description=_lowercase )
else:
snake_case_ :int = argparse.ArgumentParser("""Accelerate config command""", description=_lowercase )
parser.add_argument(
"""--config_file""", default=_lowercase, help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
), )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[int] = get_user_input()
if args.config_file is not None:
snake_case_ :Dict = args.config_file
else:
if not os.path.isdir(_lowercase ):
os.makedirs(_lowercase )
snake_case_ :Dict = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(_lowercase )
else:
config.to_yaml_file(_lowercase )
print(f"""accelerate configuration saved at {config_file}""" )
def A_ ( ):
'''simple docstring'''
snake_case_ :List[str] = config_command_parser()
snake_case_ :Tuple = parser.parse_args()
config_command(_lowercase )
if __name__ == "__main__":
main()
| 310 |
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self: int , snake_case: bool , snake_case: Optional[int] = None , snake_case: Optional[int] = None ) -> Dict:
super().__init__()
snake_case_ :int = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case_ :str = torch.zeros(snake_case , snake_case )
else:
snake_case_ :Optional[int] = None
snake_case_ :Union[str, Any] = torch.nn.Parameter(snake_case )
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : VQModel
_A : CLIPTextModel
_A : CLIPTokenizer
_A : TransformeraDModel
_A : LearnedClassifierFreeSamplingEmbeddings
_A : VQDiffusionScheduler
def __init__( self: Any , snake_case: VQModel , snake_case: CLIPTextModel , snake_case: CLIPTokenizer , snake_case: TransformeraDModel , snake_case: VQDiffusionScheduler , snake_case: LearnedClassifierFreeSamplingEmbeddings , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vqvae=snake_case , transformer=snake_case , text_encoder=snake_case , tokenizer=snake_case , scheduler=snake_case , learned_classifier_free_sampling_embeddings=snake_case , )
def lowerCAmelCase_ ( self: Tuple , snake_case: Union[str, Any] , snake_case: List[Any] , snake_case: List[str] ) -> Any:
snake_case_ :List[str] = len(snake_case ) if isinstance(snake_case , snake_case ) else 1
# get prompt text embeddings
snake_case_ :List[str] = self.tokenizer(
snake_case , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
snake_case_ :List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ :int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
snake_case_ :Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case_ :Any = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case_ :int = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case )
# duplicate text embeddings for each generation per prompt
snake_case_ :str = prompt_embeds.repeat_interleave(snake_case , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case_ :Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case_ :Any = negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case , 1 , 1 )
else:
snake_case_ :Any = [""""""] * batch_size
snake_case_ :Optional[Any] = text_input_ids.shape[-1]
snake_case_ :Dict = self.tokenizer(
snake_case , padding="""max_length""" , max_length=snake_case , truncation=snake_case , return_tensors="""pt""" , )
snake_case_ :str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
snake_case_ :Union[str, Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ :Tuple = negative_prompt_embeds.shape[1]
snake_case_ :int = negative_prompt_embeds.repeat(1 , snake_case , 1 )
snake_case_ :int = negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ :str = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Dict , snake_case: Union[str, List[str]] , snake_case: int = 100 , snake_case: float = 5.0 , snake_case: float = 1.0 , snake_case: int = 1 , snake_case: Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case: Optional[torch.FloatTensor] = None , snake_case: Optional[str] = "pil" , snake_case: bool = True , snake_case: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case: int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(snake_case , snake_case ):
snake_case_ :Any = 1
elif isinstance(snake_case , snake_case ):
snake_case_ :int = len(snake_case )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(snake_case )}""" )
snake_case_ :Tuple = batch_size * num_images_per_prompt
snake_case_ :Optional[Any] = guidance_scale > 1.0
snake_case_ :Dict = self._encode_prompt(snake_case , snake_case , snake_case )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(snake_case )}.""" )
# get the initial completely masked latents unless the user supplied it
snake_case_ :List[str] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case_ :Tuple = self.transformer.num_vector_embeds - 1
snake_case_ :Optional[int] = torch.full(snake_case , snake_case ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
snake_case_ :str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case , device=self.device )
snake_case_ :Optional[Any] = self.scheduler.timesteps.to(self.device )
snake_case_ :List[Any] = latents
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the sample if we are doing classifier free guidance
snake_case_ :List[Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case_ :Any = self.transformer(snake_case , encoder_hidden_states=snake_case , timestep=snake_case ).sample
if do_classifier_free_guidance:
snake_case_, snake_case_ :Optional[Any] = model_output.chunk(2 )
snake_case_ :Any = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case , dim=1 , keepdim=snake_case )
snake_case_ :str = self.truncate(snake_case , snake_case )
# remove `log(0)`'s (`-inf`s)
snake_case_ :List[str] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ :Any = self.scheduler.step(snake_case , timestep=snake_case , sample=snake_case , generator=snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
snake_case_ :Optional[int] = self.vqvae.config.vq_embed_dim
snake_case_ :Tuple = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case_ :List[Any] = self.vqvae.quantize.get_codebook_entry(snake_case , shape=snake_case )
snake_case_ :Dict = self.vqvae.decode(snake_case , force_not_quantize=snake_case ).sample
snake_case_ :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ :Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ :Any = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
def lowerCAmelCase_ ( self: int , snake_case: torch.FloatTensor , snake_case: float ) -> torch.FloatTensor:
snake_case_, snake_case_ :List[Any] = torch.sort(snake_case , 1 , descending=snake_case )
snake_case_ :Optional[int] = torch.exp(snake_case )
snake_case_ :int = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case_ :Union[str, Any] = torch.full_like(keep_mask[:, 0:1, :] , snake_case )
snake_case_ :List[str] = torch.cat((all_true, keep_mask) , dim=1 )
snake_case_ :List[str] = keep_mask[:, :-1, :]
snake_case_ :str = keep_mask.gather(1 , indices.argsort(1 ) )
snake_case_ :int = log_p_x_0.clone()
snake_case_ :List[Any] = -torch.inf # -inf = log(0)
return rv
| 310 | 1 |
"""simple docstring"""
from ... import PretrainedConfig
_SCREAMING_SNAKE_CASE = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class __magic_name__ ( lowercase_ ):
_SCREAMING_SNAKE_CASE : int = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
_SCREAMING_SNAKE_CASE : int = 'nezha'
def __init__( self : Any , snake_case_ : int=21128 , snake_case_ : Tuple=768 , snake_case_ : Any=12 , snake_case_ : Tuple=12 , snake_case_ : Tuple=3072 , snake_case_ : Any="gelu" , snake_case_ : Any=0.1 , snake_case_ : int=0.1 , snake_case_ : Optional[int]=512 , snake_case_ : Any=64 , snake_case_ : Optional[Any]=2 , snake_case_ : str=0.02 , snake_case_ : Union[str, Any]=1e-12 , snake_case_ : str=0.1 , snake_case_ : Tuple=0 , snake_case_ : Dict=2 , snake_case_ : Any=3 , snake_case_ : Union[str, Any]=True , **snake_case_ : int , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = max_relative_position
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = classifier_dropout
__snake_case = use_cache
| 163 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 0 |
"""simple docstring"""
from math import sqrt
def __lowerCAmelCase (_UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
__lowerCAmelCase : Optional[Any] = True
# 0 and 1 are none primes.
if number <= 1:
__lowerCAmelCase : Dict = False
for divisor in range(2 , int(round(sqrt(_UpperCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowerCAmelCase : Tuple = False
break
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'status' must been from type bool"
return status
def __lowerCAmelCase (_UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowerCAmelCase : Optional[int] = list(range(2 , n + 1 ) )
__lowerCAmelCase : Union[str, Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_UpperCamelCase ) ):
for j in range(i + 1 , len(_UpperCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowerCAmelCase : Union[str, Any] = 0
# filters actual prime numbers.
__lowerCAmelCase : str = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type list"
return ans
def __lowerCAmelCase (_UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
__lowerCAmelCase : Optional[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_UpperCamelCase ):
ans.append(_UpperCamelCase )
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type list"
return ans
def __lowerCAmelCase (_UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and number >= 0, "'number' must been an int and >= 0"
__lowerCAmelCase : Optional[Any] = [] # this list will be returns of the function.
# potential prime number factors.
__lowerCAmelCase : Dict = 2
__lowerCAmelCase : Tuple = number
if number == 0 or number == 1:
ans.append(_UpperCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_UpperCamelCase ):
while quotient != 1:
if is_prime(_UpperCamelCase ) and (quotient % factor == 0):
ans.append(_UpperCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_UpperCamelCase )
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type list"
return ans
def __lowerCAmelCase (_UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase : Dict = 0
# prime factorization of 'number'
__lowerCAmelCase : List[str] = prime_factorization(_UpperCamelCase )
__lowerCAmelCase : str = max(_UpperCamelCase )
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type int"
return ans
def __lowerCAmelCase (_UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase : int = 0
# prime factorization of 'number'
__lowerCAmelCase : str = prime_factorization(_UpperCamelCase )
__lowerCAmelCase : List[Any] = min(_UpperCamelCase )
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type int"
return ans
def __lowerCAmelCase (_UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _UpperCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def __lowerCAmelCase (_UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _UpperCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def __lowerCAmelCase (_UpperCamelCase ):
assert (
isinstance(_UpperCamelCase , _UpperCamelCase ) and (number > 2) and is_even(_UpperCamelCase )
), "'number' must been an int, even and > 2"
__lowerCAmelCase : int = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowerCAmelCase : int = get_prime_numbers(_UpperCamelCase )
__lowerCAmelCase : str = len(_UpperCamelCase )
# run variable for while-loops.
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : Tuple = None
# exit variable. for break up the loops
__lowerCAmelCase : List[str] = True
while i < len_pn and loop:
__lowerCAmelCase : List[Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowerCAmelCase : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and (len(_UpperCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and isinstance(_UpperCamelCase , _UpperCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase : List[str] = 0
while numbera != 0:
__lowerCAmelCase : Union[str, Any] = numbera % numbera
__lowerCAmelCase : Any = numbera
__lowerCAmelCase : Optional[Any] = rest
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and isinstance(_UpperCamelCase , _UpperCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowerCAmelCase : int = prime_factorization(_UpperCamelCase )
__lowerCAmelCase : Dict = prime_factorization(_UpperCamelCase )
elif numbera == 1 or numbera == 1:
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : int = []
__lowerCAmelCase : Any = max(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Any = 0
__lowerCAmelCase : int = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowerCAmelCase : Union[str, Any] = prime_fac_a.count(_UpperCamelCase )
__lowerCAmelCase : Tuple = prime_fac_a.count(_UpperCamelCase )
for _ in range(max(_UpperCamelCase , _UpperCamelCase ) ):
ans *= n
else:
__lowerCAmelCase : Dict = prime_fac_a.count(_UpperCamelCase )
for _ in range(_UpperCamelCase ):
ans *= n
done.append(_UpperCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowerCAmelCase : int = prime_fac_a.count(_UpperCamelCase )
for _ in range(_UpperCamelCase ):
ans *= n
done.append(_UpperCamelCase )
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __lowerCAmelCase (_UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n >= 0), "'number' must been a positive int"
__lowerCAmelCase : str = 0
__lowerCAmelCase : str = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_UpperCamelCase ):
ans += 1
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and is_prime(
_UpperCamelCase ), "'ans' must been a prime number and from type int"
return ans
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
assert (
is_prime(_UpperCamelCase ) and is_prime(_UpperCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowerCAmelCase : Dict = p_number_a + 1 # jump to the next number
__lowerCAmelCase : Any = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_UpperCamelCase ):
number += 1
while number < p_number_a:
ans.append(_UpperCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(_UpperCamelCase ):
number += 1
# precondition
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and ans[0] != p_number_a
and ans[len(_UpperCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __lowerCAmelCase (_UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n >= 1), "'n' must been int and >= 1"
__lowerCAmelCase : Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_UpperCamelCase )
# precondition
assert ans[0] == 1 and ans[len(_UpperCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __lowerCAmelCase (_UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
__lowerCAmelCase : Tuple = get_divisors(_UpperCamelCase )
# precondition
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and (divisors[0] == 1)
and (divisors[len(_UpperCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and isinstance(_UpperCamelCase , _UpperCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowerCAmelCase : Optional[Any] = gcd(abs(_UpperCamelCase ) , abs(_UpperCamelCase ) )
# precondition
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __lowerCAmelCase (_UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
__lowerCAmelCase : Dict = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def __lowerCAmelCase (_UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
__lowerCAmelCase : Optional[Any] = 0
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : str = 1 # this will be return
for _ in range(n - 1 ):
__lowerCAmelCase : Dict = ans
ans += fiba
__lowerCAmelCase : Optional[Any] = tmp
return ans | 549 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (_UpperCamelCase ):
# initialize config
if "resnet-50" in model_name:
__lowerCAmelCase : int = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
__lowerCAmelCase : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
__lowerCAmelCase : Union[str, Any] = DetrConfig(use_timm_backbone=_UpperCamelCase , backbone_config=_UpperCamelCase )
# set label attributes
__lowerCAmelCase : Optional[int] = 'panoptic' in model_name
if is_panoptic:
__lowerCAmelCase : Dict = 250
else:
__lowerCAmelCase : List[str] = 91
__lowerCAmelCase : Tuple = 'huggingface/label-files'
__lowerCAmelCase : str = 'coco-detection-id2label.json'
__lowerCAmelCase : Optional[int] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
__lowerCAmelCase : List[Any] = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase : List[Any] = idalabel
__lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __lowerCAmelCase (_UpperCamelCase ):
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCAmelCase : Tuple = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
F"encoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
F"decoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
) )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
) )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Dict = val
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=False ):
__lowerCAmelCase : Tuple = ''
if is_panoptic:
__lowerCAmelCase : Union[str, Any] = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCAmelCase : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__lowerCAmelCase : List[Any] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase : int = in_proj_weight[:256, :]
__lowerCAmelCase : Tuple = in_proj_bias[:256]
__lowerCAmelCase : Dict = in_proj_weight[256:512, :]
__lowerCAmelCase : Optional[int] = in_proj_bias[256:512]
__lowerCAmelCase : int = in_proj_weight[-256:, :]
__lowerCAmelCase : int = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__lowerCAmelCase : int = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__lowerCAmelCase : List[str] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase : List[str] = in_proj_weight[:256, :]
__lowerCAmelCase : int = in_proj_bias[:256]
__lowerCAmelCase : Any = in_proj_weight[256:512, :]
__lowerCAmelCase : Any = in_proj_bias[256:512]
__lowerCAmelCase : Any = in_proj_weight[-256:, :]
__lowerCAmelCase : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__lowerCAmelCase : Union[str, Any] = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
__lowerCAmelCase : Optional[Any] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__lowerCAmelCase : List[Any] = in_proj_weight_cross_attn[:256, :]
__lowerCAmelCase : Union[str, Any] = in_proj_bias_cross_attn[:256]
__lowerCAmelCase : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
__lowerCAmelCase : Any = in_proj_bias_cross_attn[256:512]
__lowerCAmelCase : str = in_proj_weight_cross_attn[-256:, :]
__lowerCAmelCase : Optional[int] = in_proj_bias_cross_attn[-256:]
def __lowerCAmelCase ():
__lowerCAmelCase : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase : Any = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=False ):
__lowerCAmelCase , __lowerCAmelCase : int = get_detr_config(_UpperCamelCase )
# load original model from torch hub
__lowerCAmelCase : List[str] = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F"Converting model {model_name}..." )
__lowerCAmelCase : List[Any] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=_UpperCamelCase ).eval()
__lowerCAmelCase : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_UpperCamelCase ):
if is_panoptic:
__lowerCAmelCase : List[str] = 'detr.' + src
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCamelCase , is_panoptic=_UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCAmelCase : Optional[Any] = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
__lowerCAmelCase : Optional[Any] = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Tuple = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCAmelCase : List[Any] = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
__lowerCAmelCase : Optional[Any] = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__lowerCAmelCase : Dict = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Optional[int] = val
# finally, create HuggingFace model and load state dict
__lowerCAmelCase : Tuple = DetrForSegmentation(_UpperCamelCase ) if is_panoptic else DetrForObjectDetection(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# verify our conversion on an image
__lowerCAmelCase : Optional[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection'
__lowerCAmelCase : List[str] = DetrImageProcessor(format=_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = processor(images=prepare_img() , return_tensors='pt' )
__lowerCAmelCase : Optional[int] = encoding['pixel_values']
__lowerCAmelCase : Optional[Any] = detr(_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = model(_UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F"nielsr/{model_name}" )
processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
lowerCamelCase__ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 549 | 1 |
from collections.abc import Callable
class __lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[Any] = None ):
"""simple docstring"""
__A = []
# Stores indexes of each item for supporting updates and deletion.
__A = {}
# Stores current size of heap.
__A = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__A = key or (lambda UpperCamelCase_ : x)
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : Any ):
"""simple docstring"""
return int((i - 1) / 2 ) if i > 0 else None
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Union[str, Any] ):
"""simple docstring"""
__A = int(2 * i + 1 )
return left if 0 < left < self.size else None
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
__A = int(2 * i + 2 )
return right if 0 < right < self.size else None
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple ):
"""simple docstring"""
__A = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__A = self.arr[j], self.arr[i]
def lowerCAmelCase_ ( self : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ):
"""simple docstring"""
return self.arr[i][1] < self.arr[j][1]
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Any ):
"""simple docstring"""
__A = self._left(_snake_case )
__A = self._right(_snake_case )
__A = i
if left is not None and not self._cmp(_snake_case , _snake_case ):
__A = left
if right is not None and not self._cmp(_snake_case , _snake_case ):
__A = right
return valid_parent
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : Union[str, Any] ):
"""simple docstring"""
__A = self._parent(_snake_case )
while parent is not None and not self._cmp(_snake_case , _snake_case ):
self._swap(_snake_case , _snake_case )
__A = parent, self._parent(_snake_case )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = self._get_valid_parent(_snake_case )
while valid_parent != index:
self._swap(_snake_case , _snake_case )
__A = valid_parent, self._get_valid_parent(_snake_case )
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : int ):
"""simple docstring"""
if item not in self.pos_map:
return
__A = self.pos_map[item]
__A = [item, self.key(_snake_case )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_snake_case )
self._heapify_down(_snake_case )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : Dict ):
"""simple docstring"""
if item not in self.pos_map:
return
__A = self.pos_map[item]
del self.pos_map[item]
__A = self.arr[self.size - 1]
__A = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_snake_case )
self._heapify_down(_snake_case )
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : int ):
"""simple docstring"""
__A = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_snake_case )] )
else:
__A = [item, self.key(_snake_case )]
__A = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return self.arr[0] if self.size else None
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 637 |
'''simple docstring'''
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 71 | 0 |
import functools
def a__ (__lowercase :str , __lowercase :str ) -> int:
_A : Dict = len(__lowercase )
_A : int = len(__lowercase )
@functools.cache
def min_distance(__lowercase :int , __lowercase :int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_A : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __lowercase ) , 1 + min_distance(__lowercase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase : Optional[int] ={'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] =['DPTFeatureExtractor']
_UpperCamelCase : Optional[Any] =['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] =[
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCamelCase : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 332 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : List[Any] = '▁'
lowercase__ : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
lowercase__ : Optional[Any] = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowercase__ : List[Any] = {
'facebook/xglm-564M': 20_48,
}
class _UpperCAmelCase ( lowerCamelCase_):
_lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any]="<s>" , lowercase_ : Union[str, Any]="</s>" , lowercase_ : Tuple="</s>" , lowercase_ : List[Any]="<s>" , lowercase_ : Optional[int]="<unk>" , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Union[str, Any] , ):
snake_case_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case_ : Union[str, Any] = 7
snake_case_ : List[str] = [f"<madeupword{i}>" for i in range(self.num_madeup_words )]
snake_case_ : Optional[int] = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
snake_case_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
snake_case_ : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ : Optional[int] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ : Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
snake_case_ : Tuple = len(self.sp_model )
snake_case_ : Tuple = {f"<madeupword{i}>": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__a )
snake_case_ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple ):
snake_case_ : int = self.__dict__.copy()
snake_case_ : Dict = None
snake_case_ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , lowercase_ : Union[str, Any] ):
snake_case_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ : Optional[Any] = {}
snake_case_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case_ : List[str] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _snake_case ( self : Tuple , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a ))
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a ))
def _snake_case ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _snake_case ( self : List[str] ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _snake_case ( self : List[Any] ):
snake_case_ : Tuple = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self : Union[str, Any] , lowercase_ : str ):
return self.sp_model.encode(__a , out_type=__a )
def _snake_case ( self : Optional[Any] , lowercase_ : Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ : List[str] = self.sp_model.PieceToId(__a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self : int , lowercase_ : str ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self : Any , lowercase_ : List[str] ):
snake_case_ : Any = ''''''.join(__a ).replace(__a , ''' ''' ).strip()
return out_string
def _snake_case ( self : Dict , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(__a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ : Optional[int] = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , '''wb''' ) as fi:
snake_case_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 123 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str , lowercase : list[str] ) -> str:
_a = ""
for word_or_phrase in separated:
if not isinstance(lowercase , lowercase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 692 | 0 |
'''simple docstring'''
__snake_case = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 603 | '''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->list[int]:
lowercase_ = [0 for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
# initialize interval's left pointer and right pointer
lowercase_ , lowercase_ = 0, 0
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowercase_ = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowercase_ = min_edge
while go_next(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowercase_ , lowercase_ = i, i + z_result[i] - 1
return z_result
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->bool:
return i + z_result[i] < len(SCREAMING_SNAKE_CASE_ ) and s[z_result[i]] == s[i + z_result[i]]
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
lowercase_ = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowercase_ = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(SCREAMING_SNAKE_CASE_ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 603 | 1 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowercase :
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = object_detector(examples[0] , threshold=0.0 )
SCREAMING_SNAKE_CASE_ : int = len(lowerCAmelCase__ )
self.assertGreater(lowerCAmelCase__ , 0 )
self.assertEqual(
lowerCAmelCase__ , [
{
'score': ANY(lowerCAmelCase__ ),
'label': ANY(lowerCAmelCase__ ),
'box': {'xmin': ANY(lowerCAmelCase__ ), 'ymin': ANY(lowerCAmelCase__ ), 'xmax': ANY(lowerCAmelCase__ ), 'ymax': ANY(lowerCAmelCase__ )},
}
for i in range(lowerCAmelCase__ )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
SCREAMING_SNAKE_CASE_ : str = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.7_235, 'label': 'cat', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.7_218, 'label': 'remote', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.7_184, 'label': 'couch', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.6_748, 'label': 'remote', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_656, 'label': 'cat', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_614, 'label': 'couch', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_456, 'label': 'remote', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 6_7, 'ymin': 2_7_4, 'xmax': 9_3, 'ymax': 2_9_7}},
{'score': 0.6_419, 'label': 'cat', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
] , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'score': 0.7_235, 'label': 'cat', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.7_218, 'label': 'remote', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.7_184, 'label': 'couch', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.6_748, 'label': 'remote', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_656, 'label': 'cat', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_614, 'label': 'couch', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6_456, 'label': 'remote', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 6_7, 'ymin': 2_7_4, 'xmax': 9_3, 'ymax': 2_9_7}},
{'score': 0.6_419, 'label': 'cat', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
]
] , )
@require_torch
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ : List[str] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
] , )
SCREAMING_SNAKE_CASE_ : List[Any] = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
],
[
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@require_torch
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 0.2
SCREAMING_SNAKE_CASE_ : str = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
] , )
@require_torch
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 2
SCREAMING_SNAKE_CASE_ : int = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ : Tuple = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
] , )
| 101 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A : str = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 275 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class snake_case_ ( _A):
lowerCamelCase :Tuple = "wav2vec2"
def __init__( self , __lowercase=3_2 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0_2 , __lowercase=1e-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(1_0, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=1_2_8 , __lowercase=1_6 , __lowercase=False , __lowercase=True , __lowercase=0.0_5 , __lowercase=1_0 , __lowercase=2 , __lowercase=0.0 , __lowercase=1_0 , __lowercase=0 , __lowercase=3_2_0 , __lowercase=2 , __lowercase=0.1 , __lowercase=1_0_0 , __lowercase=2_5_6 , __lowercase=2_5_6 , __lowercase=0.1 , __lowercase="sum" , __lowercase=False , __lowercase=False , __lowercase=2_5_6 , __lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __lowercase=(5, 3, 3, 1, 1) , __lowercase=(1, 2, 3, 1, 1) , __lowercase=5_1_2 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=3 , __lowercase=2 , __lowercase=3 , __lowercase=None , __lowercase=None , **__lowercase , ) -> Any:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase )
lowerCamelCase : int =hidden_size
lowerCamelCase : int =feat_extract_norm
lowerCamelCase : int =feat_extract_activation
lowerCamelCase : List[str] =list(__lowercase )
lowerCamelCase : Any =list(__lowercase )
lowerCamelCase : Optional[Any] =list(__lowercase )
lowerCamelCase : Any =conv_bias
lowerCamelCase : List[str] =num_conv_pos_embeddings
lowerCamelCase : Tuple =num_conv_pos_embedding_groups
lowerCamelCase : str =len(self.conv_dim )
lowerCamelCase : int =num_hidden_layers
lowerCamelCase : str =intermediate_size
lowerCamelCase : Dict =hidden_act
lowerCamelCase : Any =num_attention_heads
lowerCamelCase : Optional[int] =hidden_dropout
lowerCamelCase : int =attention_dropout
lowerCamelCase : Tuple =activation_dropout
lowerCamelCase : List[str] =feat_proj_dropout
lowerCamelCase : Optional[Any] =final_dropout
lowerCamelCase : Tuple =layerdrop
lowerCamelCase : Optional[int] =layer_norm_eps
lowerCamelCase : Union[str, Any] =initializer_range
lowerCamelCase : Any =vocab_size
lowerCamelCase : int =do_stable_layer_norm
lowerCamelCase : List[Any] =use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase : Dict =apply_spec_augment
lowerCamelCase : Optional[int] =mask_time_prob
lowerCamelCase : Optional[int] =mask_time_length
lowerCamelCase : List[str] =mask_time_min_masks
lowerCamelCase : Optional[int] =mask_feature_prob
lowerCamelCase : Optional[Any] =mask_feature_length
lowerCamelCase : List[str] =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase : Tuple =num_codevectors_per_group
lowerCamelCase : Dict =num_codevector_groups
lowerCamelCase : Optional[int] =contrastive_logits_temperature
lowerCamelCase : List[str] =feat_quantizer_dropout
lowerCamelCase : str =num_negatives
lowerCamelCase : str =codevector_dim
lowerCamelCase : List[str] =proj_codevector_dim
lowerCamelCase : Union[str, Any] =diversity_loss_weight
# ctc loss
lowerCamelCase : Any =ctc_loss_reduction
lowerCamelCase : Tuple =ctc_zero_infinity
# adapter
lowerCamelCase : Optional[int] =add_adapter
lowerCamelCase : Any =adapter_kernel_size
lowerCamelCase : str =adapter_stride
lowerCamelCase : Union[str, Any] =num_adapter_layers
lowerCamelCase : Optional[Any] =output_hidden_size or hidden_size
lowerCamelCase : List[str] =adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCamelCase : str =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCamelCase : Optional[Any] =list(__lowercase )
lowerCamelCase : List[Any] =list(__lowercase )
lowerCamelCase : int =list(__lowercase )
lowerCamelCase : Tuple =xvector_output_dim
@property
def __lowercase ( self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 705 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class snake_case_ ( _A):
lowerCamelCase :Union[str, Any] = "timesformer"
def __init__( self , __lowercase=2_2_4 , __lowercase=1_6 , __lowercase=3 , __lowercase=8 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.0_2 , __lowercase=1e-6 , __lowercase=True , __lowercase="divided_space_time" , __lowercase=0 , **__lowercase , ) -> List[Any]:
super().__init__(**__lowercase )
lowerCamelCase : int =image_size
lowerCamelCase : List[str] =patch_size
lowerCamelCase : Union[str, Any] =num_channels
lowerCamelCase : str =num_frames
lowerCamelCase : Dict =hidden_size
lowerCamelCase : int =num_hidden_layers
lowerCamelCase : Dict =num_attention_heads
lowerCamelCase : Dict =intermediate_size
lowerCamelCase : Union[str, Any] =hidden_act
lowerCamelCase : str =hidden_dropout_prob
lowerCamelCase : Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase : List[Any] =initializer_range
lowerCamelCase : List[Any] =layer_norm_eps
lowerCamelCase : List[str] =qkv_bias
lowerCamelCase : Tuple =attention_type
lowerCamelCase : List[Any] =drop_path_rate
| 262 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase__ :
def __init__( self : Any , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = data
UpperCAmelCase__ : Node | None = None
class lowerCAmelCase__ :
def __init__( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[int] = None
def __iter__( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.head
while self.head:
yield node.data
UpperCAmelCase__ : Optional[Any] = node.next
if node == self.head:
break
def __len__( self : Tuple ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : str ):
'''simple docstring'''
return "->".join(str(snake_case__ ) for item in iter(self ) )
def __a ( self : Optional[Any] , snake_case__ : Any ):
'''simple docstring'''
self.insert_nth(len(self ) , snake_case__ )
def __a ( self : Optional[Any] , snake_case__ : Any ):
'''simple docstring'''
self.insert_nth(0 , snake_case__ )
def __a ( self : Any , snake_case__ : int , snake_case__ : Any ):
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
UpperCAmelCase__ : Any = Node(snake_case__ )
if self.head is None:
UpperCAmelCase__ : Union[str, Any] = new_node # first node points itself
UpperCAmelCase__ : List[str] = new_node
elif index == 0: # insert at head
UpperCAmelCase__ : Optional[Any] = self.head
UpperCAmelCase__ : List[Any] = new_node
else:
UpperCAmelCase__ : Union[str, Any] = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ : Dict = temp.next
UpperCAmelCase__ : Tuple = temp.next
UpperCAmelCase__ : str = new_node
if index == len(self ) - 1: # insert at tail
UpperCAmelCase__ : Union[str, Any] = new_node
def __a ( self : List[Any] ):
'''simple docstring'''
return self.delete_nth(0 )
def __a ( self : Optional[Any] ):
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def __a ( self : Tuple , snake_case__ : int = 0 ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
UpperCAmelCase__ : Union[str, Any] = self.head
if self.head == self.tail: # just one node
UpperCAmelCase__ : str = None
elif index == 0: # delete head node
UpperCAmelCase__ : List[Any] = self.tail.next.next
UpperCAmelCase__ : Dict = self.head.next
else:
UpperCAmelCase__ : List[Any] = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ : Tuple = temp.next
UpperCAmelCase__ : Dict = temp.next
UpperCAmelCase__ : Dict = temp.next.next
if index == len(self ) - 1: # delete at tail
UpperCAmelCase__ : int = temp
return delete_node.data
def __a ( self : Any ):
'''simple docstring'''
return len(self ) == 0
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : str = CircularLinkedList()
assert len(snake_case ) == 0
assert circular_linked_list.is_empty() is True
assert str(snake_case ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(snake_case ) == i
circular_linked_list.insert_nth(snake_case , i + 1 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 438 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''megatron-bert'''
def __init__( self : Tuple , snake_case__ : List[Any]=2_9_0_5_6 , snake_case__ : Any=1_0_2_4 , snake_case__ : Tuple=2_4 , snake_case__ : Optional[Any]=1_6 , snake_case__ : List[str]=4_0_9_6 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : int=0.1 , snake_case__ : Dict=0.1 , snake_case__ : str=5_1_2 , snake_case__ : Optional[Any]=2 , snake_case__ : Any=0.02 , snake_case__ : Optional[Any]=1e-12 , snake_case__ : str=0 , snake_case__ : Optional[Any]="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : List[str] = type_vocab_size
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : str = layer_norm_eps
UpperCAmelCase__ : List[Any] = position_embedding_type
UpperCAmelCase__ : Dict = use_cache
| 438 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def lowerCAmelCase_ ( lowercase_ : Sequence[float] , lowercase_ : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(lowercase_ ) )
def lowerCAmelCase_ ( lowercase_ : Sequence[float] , lowercase_ : float ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = 0.0
for coeff in reversed(lowercase_ ):
__SCREAMING_SNAKE_CASE : Tuple = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 401 |
"""simple docstring"""
from math import pow, sqrt
def lowerCAmelCase_ ( *lowercase_ : float ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = len(lowercase_ ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase_ , lowercase_ )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase_ , lowercase_ , lowercase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase_ , lowercase_ , lowercase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(lowercase_ , lowercase_ , lowercase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(lowercase_ , lowercase_ , lowercase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 401 | 1 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase__ = ""
UpperCAmelCase__ = ""
UpperCAmelCase__ = ""
UpperCAmelCase__ = ""
def _UpperCAmelCase ( __lowerCamelCase : str ) -> None:
_snake_case = tweepy.OAuthHandler(__UpperCAmelCase , __UpperCAmelCase )
auth.set_access_token(__UpperCAmelCase , __UpperCAmelCase )
_snake_case = tweepy.API(__UpperCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case = api.user_timeline(screen_name=__UpperCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(__UpperCAmelCase )
# save the id of the oldest tweet less one
_snake_case = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__UpperCAmelCase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case = api.user_timeline(
screen_name=__UpperCAmelCase , count=2_00 , max_id=__UpperCAmelCase )
# save most recent tweets
alltweets.extend(__UpperCAmelCase )
# update the id of the oldest tweet less one
_snake_case = alltweets[-1].id - 1
print(f'''...{len(__UpperCAmelCase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
_snake_case = csv.writer(__UpperCAmelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__UpperCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 224 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
snake_case_ : Tuple = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : bool , __magic_name__ : str = None , __magic_name__ : list = None ) -> Dict:
lowerCamelCase_ : str = None
lowerCamelCase_ : str = os.path.abspath(os.path.join("examples" , "by_feature" ) )
lowerCamelCase_ : Union[str, Any] = os.path.abspath("examples" )
for item in os.listdir(__magic_name__ ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase_ : Union[str, Any] = os.path.join(__magic_name__ , __magic_name__ )
if os.path.isfile(__magic_name__ ) and ".py" in item_path:
with self.subTest(
tested_script=__magic_name__ , feature_script=__magic_name__ , tested_section="main()" if parser_only else "training_function()" , ):
lowerCamelCase_ : List[str] = compare_against_test(
os.path.join(__magic_name__ , __magic_name__ ) , __magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase_ : Optional[Any] = "\n".join(__magic_name__ )
if special_strings is not None:
for string in special_strings:
lowerCamelCase_ : str = diff.replace(__magic_name__ , "" )
self.assertEqual(__magic_name__ , "" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
self.one_complete_example("complete_nlp_example.py" , __magic_name__ )
self.one_complete_example("complete_nlp_example.py" , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
lowerCamelCase_ : Dict = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
lowerCamelCase_ : List[Any] = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , __magic_name__ , __magic_name__ , __magic_name__ )
self.one_complete_example("complete_cv_example.py" , __magic_name__ , __magic_name__ , __magic_name__ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = False
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[str] ) -> Union[str, Any]:
super().setUpClass()
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : int = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
lowerCamelCase_ : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : str ) -> Any:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
lowerCamelCase_ : Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
lowerCamelCase_ : Dict = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase_ : int = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
lowerCamelCase_ : List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase_ : Optional[Any] = run_command(self._launch_args + testargs , return_stdout=__magic_name__ )
self.assertNotIn("epoch 0:" , __magic_name__ )
self.assertIn("epoch 1:" , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
lowerCamelCase_ : Optional[int] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase_ : str = run_command(self._launch_args + testargs , return_stdout=__magic_name__ )
if torch.cuda.is_available():
lowerCamelCase_ : int = torch.cuda.device_count()
else:
lowerCamelCase_ : List[str] = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , __magic_name__ )
self.assertIn("epoch 1:" , __magic_name__ )
else:
self.assertIn("epoch 0:" , __magic_name__ )
self.assertIn("epoch 1:" , __magic_name__ )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
lowerCamelCase_ : Dict = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
lowerCamelCase_ : Optional[Any] = run_command(self._launch_args + testargs , return_stdout=__magic_name__ )
lowerCamelCase_ : Any = re.findall("({.+})" , __magic_name__ )
lowerCamelCase_ : Any = [r for r in results if "accuracy" in r][-1]
lowerCamelCase_ : str = ast.literal_eval(__magic_name__ )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
lowerCamelCase_ : Dict = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase_ : Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "tracking" ) ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
lowerCamelCase_ : List[Any] = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase_ : Tuple = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 488 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[str] ) -> Dict:
__snake_case : List[Any] = tempfile.mkdtemp()
# fmt: off
__snake_case : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__snake_case : Union[str, Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__snake_case : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__snake_case : int = {'unk_token': '<unk>'}
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase__ ) )
__snake_case : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
__snake_case : List[str] = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self : Optional[Any] , **lowerCamelCase : List[Any] ) -> Union[str, Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self : Dict , **lowerCamelCase : List[str] ) -> Optional[int]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self : str , **lowerCamelCase : List[Any] ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self : Optional[int] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : List[Any] ) -> Tuple:
__snake_case : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
__snake_case : str = self.get_tokenizer()
__snake_case : str = self.get_rust_tokenizer()
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : str = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
__snake_case : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
__snake_case : List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
__snake_case : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
__snake_case : str = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : str = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__snake_case : List[Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
__snake_case : int = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def __snake_case ( self : int ) -> Tuple:
__snake_case : Optional[Any] = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : int = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
__snake_case : str = self.prepare_image_inputs()
__snake_case : Optional[int] = image_processor(UpperCamelCase__ , return_tensors="np" )
__snake_case : Union[str, Any] = processor(images=UpperCamelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __snake_case ( self : List[str] ) -> List[str]:
__snake_case : Optional[int] = self.get_image_processor()
__snake_case : Optional[int] = self.get_tokenizer()
__snake_case : List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
__snake_case : Tuple = 'lower newer'
__snake_case : Dict = processor(text=UpperCamelCase__ )
__snake_case : List[str] = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : List[str] = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : str = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
__snake_case : Optional[Any] = 'lower newer'
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Optional[Any] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Dict = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : List[Any] = self.prepare_image_inputs()
__snake_case : Optional[Any] = processor(images=UpperCamelCase__ , visual_prompt=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def __snake_case ( self : List[Any] ) -> Optional[Any]:
__snake_case : List[str] = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : List[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
__snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : Union[str, Any] = processor.batch_decode(UpperCamelCase__ )
__snake_case : List[Any] = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
| 709 |
import unittest
from transformers import DonutProcessor
_snake_case : Dict = "naver-clova-ix/donut-base"
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Dict ) -> int:
__snake_case : Any = DonutProcessor.from_pretrained(lowerCamelCase )
def __snake_case ( self : str ) -> List[Any]:
__snake_case : Union[str, Any] = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
__snake_case : Union[str, Any] = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
__snake_case : Tuple = self.processor.tokenajson(lowerCamelCase )
self.assertDictEqual(lowerCamelCase , lowerCamelCase )
| 203 | 0 |
"""simple docstring"""
a =0 # The first color of the flag.
a =1 # The second color of the flag.
a =2 # The third color of the flag.
a =(red, white, blue)
def lowerCamelCase_ ( __lowerCAmelCase ) -> list:
'''simple docstring'''
if not sequence:
return []
if len(__lowerCamelCase ) == 1:
return list(__lowerCamelCase )
lowerCamelCase__ =0
lowerCamelCase__ =len(__lowerCamelCase ) - 1
lowerCamelCase__ =0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase__ , lowerCamelCase__ =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase__ , lowerCamelCase__ =sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase__ =F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
a =input('Enter numbers separated by commas:\n').strip()
a =[int(item.strip()) for item in user_input.split(',')]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 530 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase__ = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCAmelCase ( ) -> Optional[Any]:
_snake_case = Github(os.environ['''GITHUB_TOKEN'''] )
_snake_case = g.get_repo('''huggingface/diffusers''' )
_snake_case = repo.get_issues(state='''open''' )
for issue in open_issues:
_snake_case = sorted(issue.get_comments() , key=lambda __lowerCamelCase : i.created_at , reverse=__lowerCamelCase )
_snake_case = comments[0] if len(__lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 224 | 0 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class a ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , snake_case__ : str="" , snake_case__ : str="train" ):
"""simple docstring"""
assert os.path.isdir(snake_case__ )
__lowerCAmelCase = []
__lowerCAmelCase = os.listdir(snake_case__ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
if not os.path.isfile(snake_case__ ):
continue
self.documents.append(snake_case__ )
def __len__( self : int ):
"""simple docstring"""
return len(self.documents )
def __getitem__( self : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.documents[idx]
__lowerCAmelCase = document_path.split("/" )[-1]
with open(snake_case__ , encoding="utf-8" ) as source:
__lowerCAmelCase = source.read()
__lowerCAmelCase , __lowerCAmelCase = process_story(snake_case__ )
return document_name, story_lines, summary_lines
def _UpperCAmelCase ( UpperCamelCase: Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = list(filter(lambda UpperCamelCase : len(UpperCamelCase ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
__lowerCAmelCase = [_add_missing_period(UpperCamelCase ) for line in nonempty_lines]
# gather article lines
__lowerCAmelCase = []
__lowerCAmelCase = deque(UpperCamelCase )
while True:
try:
__lowerCAmelCase = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(UpperCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__lowerCAmelCase = list(filter(lambda UpperCamelCase : not t.startswith("@highlight" ) , UpperCamelCase ) )
return story_lines, summary_lines
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _UpperCAmelCase ( UpperCamelCase: Any , UpperCamelCase: Tuple , UpperCamelCase: Any ):
"""simple docstring"""
if len(UpperCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(UpperCamelCase )) )
return sequence
def _UpperCAmelCase ( UpperCamelCase: Tuple , UpperCamelCase: Dict ):
"""simple docstring"""
__lowerCAmelCase = torch.ones_like(UpperCamelCase )
__lowerCAmelCase = sequence == pad_token_id
__lowerCAmelCase = 0
return mask
def _UpperCAmelCase ( UpperCamelCase: Dict , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = [tokenizer.encode(UpperCamelCase ) for line in story_lines]
__lowerCAmelCase = [token for sentence in story_lines_token_ids for token in sentence]
__lowerCAmelCase = [tokenizer.encode(UpperCamelCase ) for line in summary_lines]
__lowerCAmelCase = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _UpperCAmelCase ( UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = []
for sequence in batch:
__lowerCAmelCase = -1
__lowerCAmelCase = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(UpperCamelCase )
return torch.tensor(UpperCamelCase )
| 376 |
import math
import sys
def _UpperCAmelCase ( UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
__lowerCAmelCase = binary_file.read()
for dat in data:
__lowerCAmelCase = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def _UpperCAmelCase ( UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = {"0": "0", "1": "1"}
__lowerCAmelCase , __lowerCAmelCase = "", ""
__lowerCAmelCase = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__lowerCAmelCase = lexicon[curr_string]
result += last_match_id
__lowerCAmelCase = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
__lowerCAmelCase = {}
for curr_key in list(UpperCamelCase ):
__lowerCAmelCase = lexicon.pop(UpperCamelCase )
__lowerCAmelCase = new_lex
__lowerCAmelCase = last_match_id + "1"
index += 1
__lowerCAmelCase = ""
return result
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
__lowerCAmelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def _UpperCAmelCase ( UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__lowerCAmelCase = data_bits[counter:]
__lowerCAmelCase = data_bits[counter + 1 :]
return data_bits
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = read_file_binary(UpperCamelCase )
__lowerCAmelCase = remove_prefix(UpperCamelCase )
__lowerCAmelCase = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 376 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class _lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ["input_ids", "attention_mask"]
snake_case = MvpTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )->List[Any]:
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
A_ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , snake_case__ ) != add_prefix_space:
A_ : Tuple = getattr(snake_case__ , pre_tok_state.pop('''type''' ) )
A_ : Tuple = add_prefix_space
A_ : Union[str, Any] = pre_tok_class(**snake_case__ )
A_ : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : Optional[int] = 'post_processor'
A_ : str = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : List[str] = tuple(state['''sep'''] )
if "cls" in state:
A_ : Optional[int] = tuple(state['''cls'''] )
A_ : List[Any] = False
if state.get('''add_prefix_space''' , snake_case__ ) != add_prefix_space:
A_ : Any = add_prefix_space
A_ : str = True
if state.get('''trim_offsets''' , snake_case__ ) != trim_offsets:
A_ : Optional[int] = trim_offsets
A_ : str = True
if changes_to_apply:
A_ : Union[str, Any] = getattr(snake_case__ , state.pop('''type''' ) )
A_ : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def _snake_case ( self )->int:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
A_ : Optional[int] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
A_ : Tuple = value
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
A_ : Optional[Any] = kwargs.get('''is_split_into_words''' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
A_ : List[Any] = kwargs.get('''is_split_into_words''' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Dict:
'''simple docstring'''
A_ : Optional[Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )->Optional[Any]:
'''simple docstring'''
A_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Dict:
'''simple docstring'''
A_ : str = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 590 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : int = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __UpperCAmelCase ( lowerCamelCase_ : List[DatasetType] , lowerCamelCase_ : Optional[List[float]] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[DatasetInfo] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(lowerCamelCase_ ):
if not isinstance(lowerCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowerCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , stopping_strategy=lowerCamelCase_ )
else:
return _interleave_iterable_datasets(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , stopping_strategy=lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[DatasetType] , lowerCamelCase_ : Optional[DatasetInfo] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(lowerCamelCase_ ):
if not isinstance(lowerCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowerCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = (
(Dataset, IterableDataset) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , axis=lowerCamelCase_ )
else:
return _concatenate_iterable_datasets(lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , axis=lowerCamelCase_ )
| 105 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ ):
a_ = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
a_ = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowercase__ =int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
lowercase__ =int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 511 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase_ ( A__ ):
# getting number of pixels in the image
a_ , a_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(A__ ):
for j in range(A__ ):
a_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowercase__ =imread('image_data/lena.jpg', 1)
# convert to its negative
lowercase__ =convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 511 | 1 |
'''simple docstring'''
import torch
def _a ( ) -> Optional[int]:
"""simple docstring"""
if torch.cuda.is_available():
__snake_case : Optional[int] = torch.cuda.device_count()
else:
__snake_case : Dict = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 26 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ :int = 16
a_ :Tuple = 32
def lowercase_ (A : Accelerator , A : int = 1_6 ):
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
snake_case__ : Tuple = load_dataset('glue' , 'mrpc' )
def tokenize_function(A : int ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : Union[str, Any] = datasets.map(
A , batched=A , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : Any = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : Tuple = 1_6
elif accelerator.mixed_precision != "no":
snake_case__ : Optional[int] = 8
else:
snake_case__ : Optional[Any] = None
return tokenizer.pad(
A , padding='longest' , max_length=A , pad_to_multiple_of=A , return_tensors='pt' , )
# Instantiate dataloaders.
snake_case__ : Union[str, Any] = DataLoader(
tokenized_datasets['train'] , shuffle=A , collate_fn=A , batch_size=A )
snake_case__ : int = DataLoader(
tokenized_datasets['validation'] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ :Tuple = mocked_dataloaders # noqa: F811
def lowercase_ (A : List[str] , A : Optional[Any] ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , A ) == "1":
snake_case__ : int = 2
# Initialize accelerator
snake_case__ : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : Any = config['lr']
snake_case__ : Dict = int(config['num_epochs'] )
snake_case__ : Tuple = int(config['seed'] )
snake_case__ : Any = int(config['batch_size'] )
snake_case__ : Optional[int] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
snake_case__ : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case__ : int = batch_size // MAX_GPU_BATCH_SIZE
snake_case__ : List[Any] = MAX_GPU_BATCH_SIZE
set_seed(A )
snake_case__ , snake_case__ : str = get_dataloaders(A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : int = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
snake_case__ : Any = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_0_0 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = accelerator.prepare(
A , A , A , A , A )
# Now we train the model
for epoch in range(A ):
model.train()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case__ : int = model(**A )
snake_case__ : Optional[Any] = outputs.loss
snake_case__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
snake_case__ : Optional[Any] = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : Optional[Any] = model(**A )
snake_case__ : Dict = outputs.logits.argmax(dim=-1 )
snake_case__ , snake_case__ : int = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(A ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
snake_case__ : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=A , references=A , )
snake_case__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , A )
def lowercase_ ():
snake_case__ : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=A , default=A , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
snake_case__ : str = parser.parse_args()
snake_case__ : Any = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(A , A )
if __name__ == "__main__":
main()
| 478 | 0 |
import math
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = f"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
SCREAMING_SNAKE_CASE = f"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
elif number == 1:
return 3
elif number == 2:
return 5
else:
SCREAMING_SNAKE_CASE = int(math.log(number // 3 , 2 ) ) + 2
SCREAMING_SNAKE_CASE = [3, 5]
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 3
for block in range(1 , SCREAMING_SNAKE_CASE ):
for _ in range(SCREAMING_SNAKE_CASE ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__A : Tuple = 0
try:
__A : Optional[int] = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 713 |
import os
import sys
import unittest
__A : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__A : List[Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a , """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
SCREAMING_SNAKE_CASE = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a , """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
SCREAMING_SNAKE_CASE = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a , """torch_and_transformers_and_onnx""" )
def _UpperCAmelCase ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , a )
self.assertIn("""torch_and_transformers""" , a )
self.assertIn("""flax_and_transformers""" , a )
self.assertIn("""torch_and_transformers_and_onnx""" , a )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""" , objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""" , objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""" , objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""" , objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""" , objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""" , objects["""torch_and_transformers_and_onnx"""] )
def _UpperCAmelCase ( self : Any ) -> int:
SCREAMING_SNAKE_CASE = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(a , """\nCONSTANT = None\n""" )
SCREAMING_SNAKE_CASE = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
a , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
SCREAMING_SNAKE_CASE = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
SCREAMING_SNAKE_CASE = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(a , a )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
SCREAMING_SNAKE_CASE = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , a )
| 450 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ViTFeatureExtractor''']
lowerCAmelCase__ = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
a__ : Tuple = prime_factors(lowerCAmelCase__ )
if is_square_free(lowerCAmelCase__ ):
return -1 if len(lowerCAmelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 642 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=4 , ) -> Optional[Any]:
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_attention_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_choices
def __A ( self ) -> Optional[Any]:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_attention_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __A ( self ) -> List[Any]:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ ,A_ = config_and_inputs
A_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __A ( self ) -> Tuple:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ ,A_ = config_and_inputs
A_ = True
A_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : int = True
__lowercase : List[str] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self ) -> Optional[Any]:
A_ = FlaxBertModelTester(self )
@slow
def __A ( self ) -> Union[str, Any]:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
A_ = FlaxBertModel.from_pretrained('''bert-base-cased''' )
A_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 174 | '''simple docstring'''
from collections.abc import Iterable
from typing import Any
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE = None ) -> List[str]:
A_ = value
A_ = None # Added in order to delete a node easier
A_ = None
A_ = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE = None ) -> Any:
A_ = root
def __str__( self ) -> str:
return str(self.root )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
if new_children is not None: # reset its kids
A_ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_SCREAMING_SNAKE_CASE ): # If it is the right children
A_ = new_children
else:
A_ = new_children
else:
A_ = new_children
def __A ( self , _SCREAMING_SNAKE_CASE ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __A ( self ) -> bool:
return self.root is None
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
A_ = Node(_SCREAMING_SNAKE_CASE ) # create a new Node
if self.empty(): # if Tree is empty
A_ = new_node # set its root
else: # Tree is not empty
A_ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
A_ = new_node # We insert the new node in a leaf
break
else:
A_ = parent_node.left
else:
if parent_node.right is None:
A_ = new_node
break
else:
A_ = parent_node.right
A_ = parent_node
def __A ( self , *_SCREAMING_SNAKE_CASE ) -> None:
for value in values:
self.__insert(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
A_ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
A_ = node.left if value < node.value else node.right
return node
def __A ( self , _SCREAMING_SNAKE_CASE = None ) -> Node | None:
if node is None:
if self.root is None:
return None
A_ = self.root
if not self.empty():
while node.right is not None:
A_ = node.right
return node
def __A ( self , _SCREAMING_SNAKE_CASE = None ) -> Node | None:
if node is None:
A_ = self.root
if self.root is None:
return None
if not self.empty():
A_ = self.root
while node.left is not None:
A_ = node.left
return node
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
A_ = self.search(_SCREAMING_SNAKE_CASE ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif node.left is None: # Has only right children
self.__reassign_nodes(_SCREAMING_SNAKE_CASE , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_SCREAMING_SNAKE_CASE , node.left )
else:
A_ = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
A_ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __A ( self , _SCREAMING_SNAKE_CASE=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
if node:
self.inorder(_SCREAMING_SNAKE_CASE , node.left )
arr.append(node.value )
self.inorder(_SCREAMING_SNAKE_CASE , node.right )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
A_ = []
self.inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # append all values to list using inorder traversal
return arr[k - 1]
def _UpperCAmelCase ( _UpperCamelCase : Node | None ) -> list[Node]:
A_ = []
if curr_node is not None:
A_ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _UpperCAmelCase ( ) -> None:
A_ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
A_ = BinarySearchTree()
for i in testlist:
t.insert(_UpperCamelCase )
# Prints all the elements of the list in order traversal
print(_UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''', t.get_max().value ) # type: ignore
print('''Min Value: ''', t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_UpperCamelCase )
print(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 174 | 1 |
"""simple docstring"""
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 104 |
"""simple docstring"""
class snake_case_ :
"""simple docstring"""
def __init__( self , __a ):
"""simple docstring"""
A__ = len(__a )
A__ = [0] * len_array
if len_array > 0:
A__ = array[0]
for i in range(1 , __a ):
A__ = self.prefix_sum[i - 1] + array[i]
def _UpperCAmelCase ( self , __a , __a ):
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
lowercase_ = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowercase_ = {
'allenai/led-base-16384': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase ( ):
"""simple docstring"""
__A = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__A = bs[:]
__A = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
__A = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = set()
__A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A = char
return pairs
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : Optional[Any] = VOCAB_FILES_NAMES
A_ : Dict = PRETRAINED_VOCAB_FILES_MAP
A_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Any = ["input_ids", "attention_mask"]
def __init__( self : Dict, _lowerCamelCase : Optional[int], _lowerCamelCase : str, _lowerCamelCase : Optional[Any]="replace", _lowerCamelCase : Optional[int]="<s>", _lowerCamelCase : Tuple="</s>", _lowerCamelCase : int="</s>", _lowerCamelCase : int="<s>", _lowerCamelCase : Any="<unk>", _lowerCamelCase : Tuple="<pad>", _lowerCamelCase : Union[str, Any]="<mask>", _lowerCamelCase : Union[str, Any]=False, **_lowerCamelCase : Optional[Any], ):
'''simple docstring'''
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else bos_token
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else eos_token
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else sep_token
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else cls_token
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else unk_token
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else mask_token
super().__init__(
errors=_lowerCamelCase, bos_token=_lowerCamelCase, eos_token=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, cls_token=_lowerCamelCase, pad_token=_lowerCamelCase, mask_token=_lowerCamelCase, add_prefix_space=_lowerCamelCase, **_lowerCamelCase, )
with open(_lowerCamelCase, encoding='''utf-8''' ) as vocab_handle:
__A = json.load(_lowerCamelCase )
__A = {v: k for k, v in self.encoder.items()}
__A = errors # how to handle errors in decoding
__A = bytes_to_unicode()
__A = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCamelCase, encoding='''utf-8''' ) as merges_handle:
__A = merges_handle.read().split('''\n''' )[1:-1]
__A = [tuple(merge.split() ) for merge in bpe_merges]
__A = dict(zip(_lowerCamelCase, range(len(_lowerCamelCase ) ) ) )
__A = {}
__A = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__A = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : Any ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__A = tuple(_lowerCamelCase )
__A = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
__A = min(_lowerCamelCase, key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase, float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__A , __A = bigram
__A = []
__A = 0
while i < len(_lowerCamelCase ):
try:
__A = word.index(_lowerCamelCase, _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__A = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__A = tuple(_lowerCamelCase )
__A = new_word
if len(_lowerCamelCase ) == 1:
break
else:
__A = get_pairs(_lowerCamelCase )
__A = ''' '''.join(_lowerCamelCase )
__A = word
return word
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : str ):
'''simple docstring'''
__A = []
for token in re.findall(self.pat, _lowerCamelCase ):
__A = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.encoder.get(_lowerCamelCase, self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Tuple ):
'''simple docstring'''
return self.decoder.get(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[Any] ):
'''simple docstring'''
__A = ''''''.join(_lowerCamelCase )
__A = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors )
return text
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__A = os.path.join(
_lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__A = os.path.join(
_lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=_lowerCamelCase, ensure_ascii=_lowerCamelCase ) + '''\n''' )
__A = 0
with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
__A = token_index
writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A = [self.cls_token_id]
__A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : Tuple, _lowerCamelCase : Union[str, Any]=False, **_lowerCamelCase : List[Any] ):
'''simple docstring'''
__A = kwargs.pop('''add_prefix_space''', self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()):
__A = ''' ''' + text
return (text, kwargs)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding], _lowerCamelCase : Optional[int] = None, _lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[bool] = None, ):
'''simple docstring'''
__A = super()._pad(
encoded_inputs=_lowerCamelCase, max_length=_lowerCamelCase, padding_strategy=_lowerCamelCase, pad_to_multiple_of=_lowerCamelCase, return_attention_mask=_lowerCamelCase, )
# Load from model defaults
if return_attention_mask is None:
__A = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__A = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__A = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
__A = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__A = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__A = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 215 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
def get_matched_characters(__UpperCamelCase , __UpperCamelCase ) -> str:
__A = []
__A = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__A = int(max(0 , i - limit ) )
__A = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__UpperCamelCase )
__A = f'{_stra[0:_stra.index(__UpperCamelCase )]} {_stra[_stra.index(__UpperCamelCase ) + 1:]}'
return "".join(__UpperCamelCase )
# matching characters
__A = get_matched_characters(__UpperCamelCase , __UpperCamelCase )
__A = get_matched_characters(__UpperCamelCase , __UpperCamelCase )
__A = len(__UpperCamelCase )
# transposition
__A = (
len([(ca, ca) for ca, ca in zip(__UpperCamelCase , __UpperCamelCase ) if ca != ca] ) // 2
)
if not match_count:
__A = 0.0
else:
__A = (
1
/ 3
* (
match_count / len(__UpperCamelCase )
+ match_count / len(__UpperCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__A = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 215 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Dict , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = data
__SCREAMING_SNAKE_CASE : Dict = None
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Tuple = None
def __iter__( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.head
while self.head:
yield node.data
__SCREAMING_SNAKE_CASE : Dict = node.next
if node == self.head:
break
def __len__( self : str ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : Tuple ):
"""simple docstring"""
return "->".join(str(__A ) for item in iter(self ) )
def UpperCAmelCase__ ( self : List[str] , _A : Any ):
"""simple docstring"""
self.insert_nth(len(self ) , __A )
def UpperCAmelCase__ ( self : Optional[Any] , _A : Any ):
"""simple docstring"""
self.insert_nth(0 , __A )
def UpperCAmelCase__ ( self : int , _A : int , _A : Any ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
__SCREAMING_SNAKE_CASE : int = Node(__A )
if self.head is None:
__SCREAMING_SNAKE_CASE : str = new_node # first node points itself
__SCREAMING_SNAKE_CASE : List[Any] = new_node
elif index == 0: # insert at head
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
__SCREAMING_SNAKE_CASE : Dict = new_node
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE : List[Any] = temp.next
__SCREAMING_SNAKE_CASE : Optional[int] = temp.next
__SCREAMING_SNAKE_CASE : Tuple = new_node
if index == len(self ) - 1: # insert at tail
__SCREAMING_SNAKE_CASE : Any = new_node
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self.delete_nth(0 )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase__ ( self : int , _A : int = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
__SCREAMING_SNAKE_CASE : List[Any] = self.head
if self.head == self.tail: # just one node
__SCREAMING_SNAKE_CASE : Dict = None
elif index == 0: # delete head node
__SCREAMING_SNAKE_CASE : int = self.tail.next.next
__SCREAMING_SNAKE_CASE : str = self.head.next
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE : int = temp.next
__SCREAMING_SNAKE_CASE : List[str] = temp.next
__SCREAMING_SNAKE_CASE : Optional[Any] = temp.next.next
if index == len(self ) - 1: # delete at tail
__SCREAMING_SNAKE_CASE : List[Any] = temp
return delete_node.data
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return len(self ) == 0
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = CircularLinkedList()
assert len(_lowercase ) == 0
assert circular_linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(_lowercase ) == i
circular_linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def __A ( _lowercase , _lowercase = "cpu" , _lowercase = None ):
'''simple docstring'''
_A = torch.load(_lowercase , map_location=_lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowercase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
_A = v.half()
if save_path is None: # overwrite src_path
_A = src_path
torch.save(_lowercase , _lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 484 | 0 |
from math import ceil
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ = 1_001 ):
__lowerCamelCase : List[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__lowerCamelCase : Optional[int] = 2 * i + 1
__lowerCamelCase : Any = 2 * i
__lowerCamelCase : List[str] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowercase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 230 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase_ = logging.getLogger(__name__)
@dataclass
class A_ :
'''simple docstring'''
__snake_case = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__snake_case = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__snake_case = field(default=__UpperCamelCase , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class A_ :
'''simple docstring'''
__snake_case = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
__snake_case = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def UpperCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
__lowerCamelCase : List[Any] = import_module('tasks' )
try:
__lowerCamelCase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , model_args.task_type )
__lowerCamelCase : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__lowerCamelCase : Dict = token_classification_task.get_labels(data_args.labels )
__lowerCamelCase : Dict[int, str] = dict(enumerate(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )} , cache_dir=model_args.cache_dir , )
__lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__lowerCamelCase : List[Any] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , )
# Get datasets
__lowerCamelCase : Dict = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE__ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__lowerCamelCase : Any = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE__ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple[List[int], List[int]]:
__lowerCamelCase : Any = np.argmax(SCREAMING_SNAKE_CASE__ , axis=2 )
__lowerCamelCase , __lowerCamelCase : List[str] = preds.shape
__lowerCamelCase : List[str] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
__lowerCamelCase : List[str] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(SCREAMING_SNAKE_CASE__ ) -> Dict:
__lowerCamelCase , __lowerCamelCase : List[str] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
"precision": precision_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
"recall": recall_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
"f1": fa_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
}
# Data collator
__lowerCamelCase : Any = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__lowerCamelCase : Union[str, Any] = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , compute_metrics=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCamelCase : List[str] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCamelCase : int = trainer.evaluate()
__lowerCamelCase : Optional[int] = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
writer.write('%s = %s\n' % (key, value) )
results.update(SCREAMING_SNAKE_CASE__ )
# Predict
if training_args.do_predict:
__lowerCamelCase : Optional[Any] = TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE__ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = trainer.predict(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = align_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__lowerCamelCase : Union[str, Any] = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return results
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 230 | 1 |
def a__ (__lowercase :int , __lowercase :int ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_A : Optional[int] = str(bin(__lowercase ) )[2:] # remove the leading "0b"
_A : Optional[Any] = str(bin(__lowercase ) )[2:] # remove the leading "0b"
_A : str = max(len(__lowercase ) , len(__lowercase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__lowercase ) , b_binary.zfill(__lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 206 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_UpperCamelCase : List[str] =HfArgumentParser(InitializationArguments)
_UpperCamelCase : Dict =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_UpperCamelCase : List[Any] =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_UpperCamelCase : str ={
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
_UpperCamelCase : int =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_UpperCamelCase : List[str] =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 206 | 1 |
from __future__ import annotations
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase = 0 ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = key
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> list[str]:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(__lowerCamelCase ) ^ key ) for ch in content]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> list[str]:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(__lowerCamelCase ) ^ key ) for ch in content]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 0 ) -> str:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
_SCREAMING_SNAKE_CASE : str = ""
for ch in content:
ans += chr(ord(__lowerCamelCase ) ^ key )
return ans
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 0 ) -> str:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
_SCREAMING_SNAKE_CASE : List[str] = ""
for ch in content:
ans += chr(ord(__lowerCamelCase ) ^ key )
return ans
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 0 ) -> bool:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
try:
with open(__lowerCamelCase ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__lowerCamelCase , __lowerCamelCase ) )
except OSError:
return False
return True
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> bool:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
try:
with open(__lowerCamelCase ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__lowerCamelCase , __lowerCamelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful") | 381 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_2 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=0.02 , __lowerCamelCase=0 , __lowerCamelCase=None , ) -> Any:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
_SCREAMING_SNAKE_CASE : str = seq_length
_SCREAMING_SNAKE_CASE : str = is_training
_SCREAMING_SNAKE_CASE : str = use_input_mask
_SCREAMING_SNAKE_CASE : int = use_labels
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = projection_dim
_SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : int = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
_SCREAMING_SNAKE_CASE : int = dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
_SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Dict = initializer_range
_SCREAMING_SNAKE_CASE : Tuple = scope
_SCREAMING_SNAKE_CASE : Any = bos_token_id
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = input_mask.numpy()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = input_mask.shape
_SCREAMING_SNAKE_CASE : str = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : List[str] = 0
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, input_ids, tf.convert_to_tensor(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[str]:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = TFBlipTextModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , training=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = config_and_inputs
_SCREAMING_SNAKE_CASE : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (TFBlipTextModel,) if is_tf_available() else ()
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[Any] = BlipTextModelTester(self )
_SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
pass
def UpperCamelCase_ ( self ) -> Tuple:
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def UpperCamelCase_ ( self ) -> Dict:
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCamelCase_ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
@slow
def UpperCamelCase_ ( self ) -> int:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlipTextModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase=True ) -> int:
super().test_pt_tf_model_equivalence(allow_missing_keys=__lowerCamelCase ) | 381 | 1 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class snake_case__ :
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int = 13 , lowerCAmelCase_ : int = 64 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : int = 1_28 , lowerCAmelCase_ : int=[16, 32, 64, 1_28] , lowerCAmelCase_ : int = 7 , lowerCAmelCase_ : int = 4 , lowerCAmelCase_ : int = 37 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 10 , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 1_28 , lowerCAmelCase_ : List[int] = [2, 2, 2, 2] , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , ) -> List[Any]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = encoder_stride
UpperCAmelCase_ = num_attention_outputs
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = embed_dim + 1
UpperCAmelCase_ = resolution
UpperCAmelCase_ = depths
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = dim
UpperCAmelCase_ = mlp_expansion_ratio
def UpperCamelCase ( self : Any ) -> List[str]:
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def UpperCamelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ) -> int:
UpperCAmelCase_ = TFEfficientFormerModel(config=lowerCAmelCase__ )
UpperCAmelCase_ = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> Tuple:
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = TFEfficientFormerForImageClassification(lowerCAmelCase__ )
UpperCAmelCase_ = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFEfficientFormerForImageClassification(lowerCAmelCase__ )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self : Dict ) -> str:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class snake_case__ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__A = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__A = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def UpperCamelCase ( self : str ) -> int:
UpperCAmelCase_ = TFEfficientFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(
self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def UpperCamelCase ( self : Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''' )
def UpperCamelCase ( self : int ) -> Optional[Any]:
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''' )
def UpperCamelCase ( self : Dict ) -> Optional[int]:
pass
def UpperCamelCase ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase__ )
UpperCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def UpperCamelCase ( self : Optional[Any] ) -> Any:
def check_hidden_states_output(lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ):
UpperCAmelCase_ = model_class(lowerCAmelCase__ )
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) , training=lowerCAmelCase__ )
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
if hasattr(self.model_tester , '''encoder_seq_length''' ):
UpperCAmelCase_ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''' ) and self.model_tester.chunk_length > 1:
UpperCAmelCase_ = seq_length * self.model_tester.chunk_length
else:
UpperCAmelCase_ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
UpperCAmelCase_ = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , '''seq_length''' , lowerCAmelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , '''decoder_seq_length''' , lowerCAmelCase__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( self : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str=False ) -> int:
UpperCAmelCase_ = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase ( self : Tuple ) -> str:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''' )
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def UpperCamelCase ( self : List[Any] ) -> str:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def UpperCamelCase ( self : Optional[Any] ) -> int:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = TFEfficientFormerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = getattr(self.model_tester , '''seq_length''' , lowerCAmelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , '''encoder_seq_length''' , lowerCAmelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , '''key_length''' , lowerCAmelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , '''chunk_length''' , lowerCAmelCase__ )
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes''' ):
UpperCAmelCase_ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(lowerCAmelCase__ )
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) , training=lowerCAmelCase__ )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(lowerCAmelCase__ )
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) , training=lowerCAmelCase__ )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
UpperCAmelCase_ = model_class(lowerCAmelCase__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
UpperCAmelCase_ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
UpperCAmelCase_ = model(lowerCAmelCase__ )
self.assertTrue(outputs_dict is not None )
def _lowerCAmelCase ( ):
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class snake_case__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase ( self : Any ) -> List[str]:
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self : Optional[int] ) -> str:
UpperCAmelCase_ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''' )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase__ , return_tensors='''tf''' )
# forward pass
UpperCAmelCase_ = model(**lowerCAmelCase__ , training=lowerCAmelCase__ )
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
UpperCAmelCase_ = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def UpperCamelCase ( self : List[Any] ) -> int:
UpperCAmelCase_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''' )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase__ , return_tensors='''tf''' )
# forward pass
UpperCAmelCase_ = model(**lowerCAmelCase__ , training=lowerCAmelCase__ )
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
UpperCAmelCase_ = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 121 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert'''
def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = position_embedding_type
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
| 696 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _snake_case ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def _snake_case ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def _snake_case ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase ):
http_head("https://huggingface.co" )
| 316 | from __future__ import annotations
import math
import random
from typing import Any
class a__ :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[Any] = []
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : int = 0
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return self.head == self.tail
def __UpperCamelCase ( self : Any,_A : Any ):
"""simple docstring"""
self.data.append(_A )
SCREAMING_SNAKE_CASE_ : int = self.tail + 1
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.data[self.head]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.head + 1
return ret
def __UpperCamelCase ( self : int ):
"""simple docstring"""
return self.tail - self.head
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class a__ :
def __init__( self : Dict,_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = data
SCREAMING_SNAKE_CASE_ : MyNode | None = None
SCREAMING_SNAKE_CASE_ : MyNode | None = None
SCREAMING_SNAKE_CASE_ : int = 1
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return self.data
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self.left
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return self.right
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return self.height
def __UpperCamelCase ( self : Dict,_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = data
def __UpperCamelCase ( self : int,_A : MyNode | None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = node
def __UpperCamelCase ( self : int,_A : MyNode | None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = node
def __UpperCamelCase ( self : Dict,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = height
def _snake_case ( lowerCAmelCase : MyNode | None ):
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
if a > b:
return a
return b
def _snake_case ( lowerCAmelCase : MyNode ):
"""simple docstring"""
print("left rotation node:" , node.get_data() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase )
return ret
def _snake_case ( lowerCAmelCase : MyNode ):
"""simple docstring"""
print("right rotation node:" , node.get_data() )
SCREAMING_SNAKE_CASE_ : Tuple = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase )
return ret
def _snake_case ( lowerCAmelCase : MyNode ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowerCAmelCase ) )
return right_rotation(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : MyNode ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowerCAmelCase ) )
return left_rotation(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : MyNode | None , lowerCAmelCase : Any ):
"""simple docstring"""
if node is None:
return MyNode(lowerCAmelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowerCAmelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE_ : str = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE_ : Optional[Any] = right_rotation(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Tuple = lr_rotation(lowerCAmelCase )
else:
node.set_right(insert_node(node.get_right() , lowerCAmelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE_ : Optional[Any] = rl_rotation(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = left_rotation(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase )
return node
def _snake_case ( lowerCAmelCase : MyNode ):
"""simple docstring"""
while True:
SCREAMING_SNAKE_CASE_ : List[str] = root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE_ : Any = right_child
return root.get_data()
def _snake_case ( lowerCAmelCase : MyNode ):
"""simple docstring"""
while True:
SCREAMING_SNAKE_CASE_ : List[Any] = root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE_ : List[Any] = left_child
return root.get_data()
def _snake_case ( lowerCAmelCase : MyNode , lowerCAmelCase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = root.get_left()
SCREAMING_SNAKE_CASE_ : str = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE_ : Tuple = get_left_most(lowerCAmelCase )
root.set_data(lowerCAmelCase )
root.set_right(del_node(lowerCAmelCase , lowerCAmelCase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE_ : str = left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(lowerCAmelCase , lowerCAmelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowerCAmelCase , lowerCAmelCase ) )
if get_height(lowerCAmelCase ) - get_height(lowerCAmelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE_ : str = left_rotation(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : int = rl_rotation(lowerCAmelCase )
elif get_height(lowerCAmelCase ) - get_height(lowerCAmelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE_ : Optional[int] = right_rotation(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Tuple = lr_rotation(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowerCAmelCase )
return root
class a__ :
def __init__( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : MyNode | None = None
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return get_height(self.root )
def __UpperCamelCase ( self : Optional[int],_A : Any ):
"""simple docstring"""
print("insert:" + str(_A ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = insert_node(self.root,_A )
def __UpperCamelCase ( self : Any,_A : Any ):
"""simple docstring"""
print("delete:" + str(_A ) )
if self.root is None:
print("Tree is empty!" )
return
SCREAMING_SNAKE_CASE_ : str = del_node(self.root,_A )
def __str__( self : int,): # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ""
SCREAMING_SNAKE_CASE_ : str = MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE_ : str = self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE_ : Any = 0
while not q.is_empty():
SCREAMING_SNAKE_CASE_ : str = q.pop()
SCREAMING_SNAKE_CASE_ : List[Any] = " " * int(math.pow(2,layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_A )
q.push(_A )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE_ : Optional[Any] = cnt + 1
for i in range(100 ):
if cnt == math.pow(2,_A ) - 1:
SCREAMING_SNAKE_CASE_ : List[Any] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def _snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__lowerCamelCase : Union[str, Any] = AVLtree()
__lowerCamelCase : List[str] = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 316 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : int = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = ['OwlViTFeatureExtractor']
UpperCAmelCase_ : Tuple = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 44 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def __a ( A__ : Dict , A__ : Dict , A__ : Any ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
return new_state_dict
def __a ( A__ : Optional[Any] , A__ : Tuple=False ):
SCREAMING_SNAKE_CASE = ""
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:256]
SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
def __a ( ):
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __a ( A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE = "resnet101"
if "dc5" in model_name:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE = 250
else:
SCREAMING_SNAKE_CASE = 91
SCREAMING_SNAKE_CASE = "huggingface/label-files"
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=A__ )
# prepare image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
SCREAMING_SNAKE_CASE = torch.hub.load("DeppMeng/ConditionalDETR" , A__ , pretrained=A__ ).eval()
SCREAMING_SNAKE_CASE = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr." + src
rename_key(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
SCREAMING_SNAKE_CASE = conditional_detr(A__ )
SCREAMING_SNAKE_CASE = model(A__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__A : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 16 | 0 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__snake_case : Optional[int] = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Optional[Any]=None ) -> List[Any]:
require_version(deps[pkg], _UpperCamelCase )
| 713 | '''simple docstring'''
__snake_case : List[Any] = 256
# Modulus to hash a string
__snake_case : str = 1_000_003
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : str ) -> bool:
A_ = len(_UpperCamelCase )
A_ = len(_UpperCamelCase )
if p_len > t_len:
return False
A_ = 0
A_ = 0
A_ = 1
# Calculating the hash of pattern and substring of text
for i in range(_UpperCamelCase ):
A_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
A_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
A_ = (modulus_power * alphabet_size) % modulus
for i in range(0, t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
A_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _UpperCAmelCase ( ) -> None:
A_ = '''abc1abc12'''
A_ = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
A_ = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase ) and not rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 2)
A_ = '''ABABX'''
A_ = '''ABABZABABYABABX'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 3)
A_ = '''AAAB'''
A_ = '''ABAAAAAB'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 4)
A_ = '''abcdabcy'''
A_ = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 5)
A_ = '''Lü'''
A_ = '''Lüsai'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
A_ = '''Lue'''
assert not rabin_karp(_UpperCamelCase, _UpperCamelCase )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 174 | 0 |
def a__ ( lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("Input value must be an 'int' type" )
UpperCAmelCase_ =0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
from __future__ import annotations
UpperCamelCase = '#'
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict ) -> None:
lowerCAmelCase__ = {}
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> None:
lowerCAmelCase__ = self._trie
for char in text:
if char not in trie:
lowerCAmelCase__ = {}
lowerCAmelCase__ = trie[char]
lowerCAmelCase__ = True
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> tuple | list:
lowerCAmelCase__ = self._trie
for char in prefix:
if char in trie:
lowerCAmelCase__ = trie[char]
else:
return []
return self._elements(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : dict ) -> tuple:
lowerCAmelCase__ = []
for c, v in d.items():
lowerCAmelCase__ = [" "] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE__ )]
result.extend(SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = Trie()
UpperCamelCase = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = trie.find_word(lowerCAmelCase_ )
return tuple(string + word for word in suffixes )
def _A ( ):
"""simple docstring"""
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 61 | 0 |
"""simple docstring"""
class __A :
def __init__( self ):
_lowerCAmelCase : dict[str, TrieNode] = {} # Mapping from char to TrieNode
_lowerCAmelCase : Tuple = False
def __A ( self , a__ ):
for word in words:
self.insert(a__ )
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = self
for char in word:
if char not in curr.nodes:
_lowerCAmelCase : List[str] = TrieNode()
_lowerCAmelCase : str = curr.nodes[char]
_lowerCAmelCase : int = True
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self
for char in word:
if char not in curr.nodes:
return False
_lowerCAmelCase : Tuple = curr.nodes[char]
return curr.is_leaf
def __A ( self , a__ ):
def _delete(a__ , a__ , a__ ) -> bool:
if index == len(a__ ):
# If word does not exist
if not curr.is_leaf:
return False
_lowerCAmelCase : Optional[Any] = False
return len(curr.nodes ) == 0
_lowerCAmelCase : Union[str, Any] = word[index]
_lowerCAmelCase : Tuple = curr.nodes.get(a__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_lowerCAmelCase : Dict = _delete(a__ , a__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , a__ , 0 )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : TrieNode ,_lowerCamelCase : str ) -> None:
if node.is_leaf:
print(_lowerCamelCase ,end=""" """ )
for key, value in node.nodes.items():
print_words(_lowerCamelCase ,word + key )
def SCREAMING_SNAKE_CASE ( ) -> bool:
_lowerCAmelCase : Dict = """banana bananas bandana band apple all beast""".split()
_lowerCAmelCase : Dict = TrieNode()
root.insert_many(_lowerCamelCase )
# print_words(root, "")
assert all(root.find(_lowerCamelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : bool ) -> None:
print(str(_lowerCamelCase ) ,"""works!""" if passes else """doesn't work :(""" )
def SCREAMING_SNAKE_CASE ( ) -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE ( ) -> None:
print_results("""Testing trie functionality""" ,test_trie() )
if __name__ == "__main__":
main()
| 701 | """simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class A__ ( _snake_case ):
lowercase = "xlm-roberta"
def __init__( self , UpperCamelCase__=30522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-1_2 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = classifier_dropout
class A__ ( _snake_case ):
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 288 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class A__ ( _snake_case , _snake_case ):
lowercase = "resnet"
lowercase = ["basic", "bottleneck"]
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=64 , UpperCamelCase__=[256, 512, 1024, 2048] , UpperCamelCase__=[3, 4, 6, 3] , UpperCamelCase__="bottleneck" , UpperCamelCase__="relu" , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
A_ = num_channels
A_ = embedding_size
A_ = hidden_sizes
A_ = depths
A_ = layer_type
A_ = hidden_act
A_ = downsample_in_first_stage
A_ = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
A_ , A_ = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
class A__ ( _snake_case ):
lowercase = version.parse("1.11" )
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 288 | 1 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __UpperCAmelCase ( a_: Optional[int], a_: Tuple, a_: Tuple, a_: str, a_: Optional[int]=True, a_: str="pt" ):
_UpperCAmelCase : Optional[int] = {"add_prefix_space": True} if isinstance(UpperCamelCase__, UpperCamelCase__ ) and not line.startswith(" " ) else {}
_UpperCAmelCase : Optional[int] = padding_side
return tokenizer(
[line], max_length=UpperCamelCase__, padding="max_length" if pad_to_max_length else None, truncation=UpperCamelCase__, return_tensors=UpperCamelCase__, add_special_tokens=UpperCamelCase__, **UpperCamelCase__, )
def __UpperCAmelCase ( a_: Dict, a_: Optional[int], a_: Optional[int]=None, ):
_UpperCAmelCase : Tuple = input_ids.ne(UpperCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict="train" , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Any="" , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase : Any = Path(lowerCAmelCase__ ).joinpath(type_path + ".source" )
_UpperCAmelCase : List[Any] = Path(lowerCAmelCase__ ).joinpath(type_path + ".target" )
_UpperCAmelCase : str = self.get_char_lens(self.src_file )
_UpperCAmelCase : Dict = max_source_length
_UpperCAmelCase : List[Any] = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_UpperCAmelCase : Tuple = tokenizer
_UpperCAmelCase : Optional[int] = prefix
if n_obs is not None:
_UpperCAmelCase : Optional[int] = self.src_lens[:n_obs]
_UpperCAmelCase : Any = src_lang
_UpperCAmelCase : List[Any] = tgt_lang
def __len__( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : str , lowerCAmelCase__ : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = index + 1 # linecache starts at 1
_UpperCAmelCase : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase__ ).rstrip("\n" )
_UpperCAmelCase : Optional[Any] = linecache.getline(str(self.tgt_file ) , lowerCAmelCase__ ).rstrip("\n" )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase : Tuple = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
)
_UpperCAmelCase : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
_UpperCAmelCase : Dict = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_source_length , "right" )
_UpperCAmelCase : Optional[int] = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_target_length , "right" )
_UpperCAmelCase : List[Any] = source_inputs["input_ids"].squeeze()
_UpperCAmelCase : List[Any] = target_inputs["input_ids"].squeeze()
_UpperCAmelCase : int = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCAmelCase ( lowerCAmelCase__ : Any ) -> List[str]:
"""simple docstring"""
return [len(lowerCAmelCase__ ) for x in Path(lowerCAmelCase__ ).open().readlines()]
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = torch.stack([x["input_ids"] for x in batch] )
_UpperCAmelCase : Optional[Any] = torch.stack([x["attention_mask"] for x in batch] )
_UpperCAmelCase : Tuple = torch.stack([x["decoder_input_ids"] for x in batch] )
_UpperCAmelCase : List[str] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase : Optional[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase : List[str] = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : List[str] = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCAmelCase : Any = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
__a = getLogger(__name__)
def __UpperCAmelCase ( a_: List[str] ):
return list(itertools.chain.from_iterable(UpperCamelCase__ ) )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Optional[int] = get_git_info()
save_json(UpperCamelCase__, os.path.join(UpperCamelCase__, "git_log.json" ) )
def __UpperCAmelCase ( a_: str, a_: Union[str, Any], a_: Union[str, Any]=4, **a_: Union[str, Any] ):
with open(UpperCamelCase__, "w" ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__, indent=UpperCamelCase__, **UpperCamelCase__ )
def __UpperCAmelCase ( a_: Optional[int] ):
with open(UpperCamelCase__ ) as f:
return json.load(UpperCamelCase__ )
def __UpperCAmelCase ( ):
_UpperCAmelCase : int = git.Repo(search_parent_directories=UpperCamelCase__ )
_UpperCAmelCase : int = {
"repo_id": str(UpperCamelCase__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __UpperCAmelCase ( a_: List[Any], a_: Optional[Any] ):
return list(map(UpperCamelCase__, UpperCamelCase__ ) )
def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[int] ):
with open(UpperCamelCase__, "wb" ) as f:
return pickle.dump(UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( a_: Optional[int] ):
def remove_articles(a_: Dict ):
return re.sub(r"\b(a|an|the)\b", " ", UpperCamelCase__ )
def white_space_fix(a_: List[Any] ):
return " ".join(text.split() )
def remove_punc(a_: str ):
_UpperCAmelCase : List[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a_: Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def __UpperCAmelCase ( a_: Optional[Any], a_: Optional[int] ):
_UpperCAmelCase : Any = normalize_answer(UpperCamelCase__ ).split()
_UpperCAmelCase : Any = normalize_answer(UpperCamelCase__ ).split()
_UpperCAmelCase : List[Any] = Counter(UpperCamelCase__ ) & Counter(UpperCamelCase__ )
_UpperCAmelCase : Tuple = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase : List[str] = 1.0 * num_same / len(UpperCamelCase__ )
_UpperCAmelCase : int = 1.0 * num_same / len(UpperCamelCase__ )
_UpperCAmelCase : Tuple = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCAmelCase ( a_: Any, a_: str ):
return normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ )
def __UpperCAmelCase ( a_: Tuple, a_: Dict ):
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
_UpperCAmelCase : Tuple = 0
for hypo, pred in zip(UpperCamelCase__, UpperCamelCase__ ):
em += exact_match_score(UpperCamelCase__, UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
em /= len(UpperCamelCase__ )
return {"em": em}
def __UpperCAmelCase ( a_: Optional[int] ):
return model_prefix.startswith("rag" )
def __UpperCAmelCase ( a_: Tuple, a_: Tuple, a_: List[Any] ):
_UpperCAmelCase : List[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase : str = "dropout_rate"
for p in extra_params:
if getattr(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
if not hasattr(UpperCamelCase__, UpperCamelCase__ ) and not hasattr(UpperCamelCase__, equivalent_param[p] ):
logger.info("config doesn\'t have a `{}` attribute".format(UpperCamelCase__ ) )
delattr(UpperCamelCase__, UpperCamelCase__ )
continue
_UpperCAmelCase : List[str] = p if hasattr(UpperCamelCase__, UpperCamelCase__ ) else equivalent_param[p]
setattr(UpperCamelCase__, UpperCamelCase__, getattr(UpperCamelCase__, UpperCamelCase__ ) )
delattr(UpperCamelCase__, UpperCamelCase__ )
return hparams, config | 720 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 257 | 0 |
from collections import namedtuple
a : List[Any] = namedtuple("from_to", "from_ to")
a : Tuple = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.0_01, 1_000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_04_54, 2_64.1_72),
"cubicyard": from_to(0.7_64_55, 1.3_07_95),
"cubicfoot": from_to(0.0_28, 35.31_47),
"cup": from_to(0.0_00_23_65_88, 42_26.75),
}
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : str , __lowerCamelCase : str ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ """, """.join(__lowerCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ """, """.join(__lowerCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = ViTImageProcessor if is_vision_available() else None
@property
def __snake_case ( self : List[str]) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Optional[Any]) -> List[str]:
A_ = (3, 32, 128)
A_ = tempfile.mkdtemp()
# fmt: off
A_ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
A_ = dict(zip(_lowercase , range(len(_lowercase))))
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_lowercase) + '\n')
A_ = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 128},
}
A_ = os.path.join(self.tmpdirname , _lowercase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(_lowercase , _lowercase)
def __snake_case ( self : int , **_lowercase : Optional[int]) -> int:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase)
def __snake_case ( self : Optional[int] , **_lowercase : Optional[int]) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowercase)
def __snake_case ( self : Dict) -> str:
shutil.rmtree(self.tmpdirname)
def __snake_case ( self : Union[str, Any]) -> Any:
A_ = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
A_ = Image.fromarray(np.moveaxis(_lowercase , 0 , -1))
return image_input
def __snake_case ( self : Optional[Any]) -> List[Any]:
A_ = self.get_tokenizer()
A_ = self.get_image_processor()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
processor.save_pretrained(self.tmpdirname)
A_ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
def __snake_case ( self : Union[str, Any]) -> Optional[Any]:
A_ = self.get_tokenizer()
A_ = self.get_image_processor()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
processor.save_pretrained(self.tmpdirname)
A_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
A_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
A_ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
def __snake_case ( self : List[Any]) -> str:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = self.prepare_image_inputs()
A_ = image_processor(_lowercase , return_tensors='np')
A_ = processor(images=_lowercase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def __snake_case ( self : Any) -> str:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = 'test'
A_ = processor(text=_lowercase)
A_ = tokenizer(_lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __snake_case ( self : str) -> Dict:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = 'test'
A_ = self.prepare_image_inputs()
A_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'labels'])
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __snake_case ( self : Union[str, Any]) -> Optional[int]:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.char_decode(_lowercase)
A_ = tokenizer.batch_decode(_lowercase)
A_ = [seq.replace(' ' , '') for seq in decoded_tok]
self.assertListEqual(_lowercase , _lowercase)
def __snake_case ( self : List[str]) -> str:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = None
A_ = self.prepare_image_inputs()
A_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def __snake_case ( self : List[str]) -> Any:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = torch.randn(1 , 27 , 38)
A_ = torch.randn(1 , 27 , 50_257)
A_ = torch.randn(1 , 27 , 30_522)
A_ = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'])
| 366 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] ={
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class A_ ( __a ):
_A :str = '''canine'''
def __init__( self : Any , snake_case__ : List[str]=7_68 , snake_case__ : Optional[Any]=12 , snake_case__ : Dict=12 , snake_case__ : int=30_72 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : int=0.1 , snake_case__ : Tuple=1_63_84 , snake_case__ : Tuple=16 , snake_case__ : Any=0.02 , snake_case__ : Tuple=1E-12 , snake_case__ : List[Any]=0 , snake_case__ : Union[str, Any]=0Xe_0_0_0 , snake_case__ : str=0Xe_0_0_1 , snake_case__ : Optional[int]=4 , snake_case__ : List[Any]=4 , snake_case__ : Optional[Any]=8 , snake_case__ : Tuple=1_63_84 , snake_case__ : Any=1_28 , **snake_case__ : Tuple , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = type_vocab_size
lowercase = layer_norm_eps
# Character config:
lowercase = downsampling_rate
lowercase = upsampling_kernel_size
lowercase = num_hash_functions
lowercase = num_hash_buckets
lowercase = local_transformer_stride
| 721 |
class A_ :
def __init__( self : Optional[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] ):
lowercase = name
lowercase = val
def __str__( self : str ):
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : int , snake_case__ : Optional[int] ):
return self.val < other.val
class A_ :
def __init__( self : str , snake_case__ : List[str] ):
lowercase = {}
lowercase = {}
lowercase = self.build_heap(snake_case__ )
def __getitem__( self : Union[str, Any] , snake_case__ : int ):
return self.get_value(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Optional[Any] ):
return (idx - 1) // 2
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Dict ):
return idx * 2 + 1
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Optional[Any] ):
return idx * 2 + 2
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Dict ):
return self.heap_dict[key]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Any ):
lowercase = len(snake_case__ ) - 1
lowercase = self.get_parent_idx(snake_case__ )
for idx, i in enumerate(snake_case__ ):
lowercase = idx
lowercase = i.val
for i in range(snake_case__ , -1 , -1 ):
self.sift_down(snake_case__ , snake_case__ )
return array
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : str ):
while True:
lowercase = self.get_left_child_idx(snake_case__ ) # noqa: E741
lowercase = self.get_right_child_idx(snake_case__ )
lowercase = idx
if l < len(snake_case__ ) and array[l] < array[idx]:
lowercase = l
if r < len(snake_case__ ) and array[r] < array[smallest]:
lowercase = r
if smallest != idx:
lowercase , lowercase = array[smallest], array[idx]
(
(
lowercase
) , (
lowercase
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase = smallest
else:
break
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Optional[int] ):
lowercase = self.get_parent_idx(snake_case__ )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase , lowercase = self.heap[idx], self.heap[p]
lowercase , lowercase = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase = p
lowercase = self.get_parent_idx(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
return self.heap[0]
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowercase , lowercase = self.heap[-1], self.heap[0]
lowercase , lowercase = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Union[str, Any] ):
self.heap.append(snake_case__ )
lowercase = len(self.heap ) - 1
lowercase = node.val
self.sift_up(len(self.heap ) - 1 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
return len(self.heap ) == 0
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : int , snake_case__ : Dict ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase = new_value
lowercase = new_value
self.sift_up(self.idx_of_element[node] )
__SCREAMING_SNAKE_CASE : Any =Node('''R''', -1)
__SCREAMING_SNAKE_CASE : Union[str, Any] =Node('''B''', 6)
__SCREAMING_SNAKE_CASE : str =Node('''A''', 3)
__SCREAMING_SNAKE_CASE : List[Any] =Node('''X''', 1)
__SCREAMING_SNAKE_CASE : str =Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__SCREAMING_SNAKE_CASE : Any =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Tuple = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 297 | 1 |
def _a ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
def get_matched_characters(_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ) -> str:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_SCREAMING_SNAKE_CASE = int(max(0 , i - limit ) )
_SCREAMING_SNAKE_CASE = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = F'{_stra[0:_stra.index(_SCREAMING_SNAKE_CASE )]} {_stra[_stra.index(_SCREAMING_SNAKE_CASE ) + 1:]}'
return "".join(_SCREAMING_SNAKE_CASE )
# matching characters
_SCREAMING_SNAKE_CASE = get_matched_characters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = get_matched_characters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE )
# transposition
_SCREAMING_SNAKE_CASE = (
len([(ca, ca) for ca, ca in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if ca != ca] ) // 2
)
if not match_count:
_SCREAMING_SNAKE_CASE = 0.0
else:
_SCREAMING_SNAKE_CASE = (
1
/ 3
* (
match_count / len(_SCREAMING_SNAKE_CASE )
+ match_count / len(_SCREAMING_SNAKE_CASE )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_SCREAMING_SNAKE_CASE = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world""")) | 711 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
a : Optional[int] = KandinskyImgaImgPipeline
a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
a : Union[str, Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : int = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Tuple = False
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return self.time_input_dim
@property
def lowercase ( self ):
return self.time_input_dim * 4
@property
def lowercase ( self ):
return 100
@property
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
_SCREAMING_SNAKE_CASE = MultilingualCLIP(UpperCamelCase )
_SCREAMING_SNAKE_CASE = text_encoder.eval()
return text_encoder
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def lowercase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE = self.dummy_unet
_SCREAMING_SNAKE_CASE = self.dummy_movq
_SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_SCREAMING_SNAKE_CASE = DDIMScheduler(**UpperCamelCase )
_SCREAMING_SNAKE_CASE = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase ( self , UpperCamelCase , UpperCamelCase=0 ):
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase )
# create init_image
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(UpperCamelCase ).startswith("mps" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_SCREAMING_SNAKE_CASE = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = "cpu"
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
_SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
_SCREAMING_SNAKE_CASE = "A red cartoon frog, 4k"
_SCREAMING_SNAKE_CASE = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = torch.Generator(device="cpu" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
_SCREAMING_SNAKE_CASE = pipeline(
UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase ) | 493 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowerCamelCase :Tuple = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[int] ='''ernie_m'''
snake_case__ : Dict[str, str] ={"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: List[str] , __a: int = 250_002 , __a: int = 768 , __a: int = 12 , __a: int = 12 , __a: int = 3_072 , __a: str = "gelu" , __a: float = 0.1 , __a: float = 0.1 , __a: int = 514 , __a: float = 0.02 , __a: int = 1 , __a: float = 1e-0_5 , __a: int=None , __a: Union[str, Any]=False , __a: Optional[Any]=0.0 , **__a: Any , )-> Union[str, Any]:
super().__init__(pad_token_id=__a , **__a )
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : Any = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Optional[int] = hidden_dropout_prob
lowerCamelCase : List[Any] = attention_probs_dropout_prob
lowerCamelCase : Union[str, Any] = max_position_embeddings
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Optional[Any] = layer_norm_eps
lowerCamelCase : Union[str, Any] = classifier_dropout
lowerCamelCase : Tuple = is_decoder
lowerCamelCase : List[str] = act_dropout
| 222 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def snake_case ( ) -> int:
lowerCamelCase : List[str] = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
lowerCamelCase : List[Any] = Dataset.from_dict(UpperCamelCase__ )
return dataset
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: List[str] )-> str:
lowerCamelCase : Union[str, Any] = get_dataset()
lowerCamelCase : Union[str, Any] = make_duplicate_clusters(__a , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : List[str] = get_dataset()
lowerCamelCase , lowerCamelCase : Optional[Any] = deduplicate_dataset(__a )
self.assertEqual(len(__a ) , 2 )
print(__a )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , __a )
| 222 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711 |
def UpperCamelCase_( _A :str )-> str:
UpperCamelCase__ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCamelCase_( _A :str )-> dict[str, str]:
UpperCamelCase__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
UpperCamelCase__ = remove_duplicates(key.upper() )
UpperCamelCase__ = len(_A )
# First fill cipher with key characters
UpperCamelCase__ = {alphabet[i]: char for i, char in enumerate(_A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_A ) , 26 ):
UpperCamelCase__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
UpperCamelCase__ = alphabet[i - offset]
UpperCamelCase__ = char
return cipher_alphabet
def UpperCamelCase_( _A :str , _A :dict[str, str] )-> str:
return "".join(cipher_map.get(_A , _A ) for ch in message.upper() )
def UpperCamelCase_( _A :str , _A :dict[str, str] )-> str:
UpperCamelCase__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_A , _A ) for ch in message.upper() )
def UpperCamelCase_( )-> None:
UpperCamelCase__ = input("Enter message to encode or decode: " ).strip()
UpperCamelCase__ = input("Enter keyword: " ).strip()
UpperCamelCase__ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
UpperCamelCase__ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
UpperCamelCase__ = create_cipher_map(_A )
print(func(_A , _A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 185 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"""configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""],
"""tokenization_roc_bert""": ["""RoCBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoCBertForCausalLM""",
"""RoCBertForMaskedLM""",
"""RoCBertForMultipleChoice""",
"""RoCBertForPreTraining""",
"""RoCBertForQuestionAnswering""",
"""RoCBertForSequenceClassification""",
"""RoCBertForTokenClassification""",
"""RoCBertLayer""",
"""RoCBertModel""",
"""RoCBertPreTrainedModel""",
"""load_tf_weights_in_roc_bert""",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class a__ ( A__ , A__ ):
UpperCAmelCase__ = '''focalnet'''
def __init__( self :Any , _lowerCamelCase :List[Any]=224 , _lowerCamelCase :str=4 , _lowerCamelCase :List[str]=3 , _lowerCamelCase :Optional[Any]=96 , _lowerCamelCase :Dict=False , _lowerCamelCase :Optional[Any]=[192, 384, 768, 768] , _lowerCamelCase :Tuple=[2, 2, 6, 2] , _lowerCamelCase :List[Any]=[2, 2, 2, 2] , _lowerCamelCase :Optional[int]=[3, 3, 3, 3] , _lowerCamelCase :Optional[Any]="gelu" , _lowerCamelCase :Tuple=4.0 , _lowerCamelCase :Optional[Any]=0.0 , _lowerCamelCase :int=0.1 , _lowerCamelCase :List[str]=False , _lowerCamelCase :str=1E-4 , _lowerCamelCase :Optional[int]=False , _lowerCamelCase :List[str]=False , _lowerCamelCase :str=False , _lowerCamelCase :Optional[int]=0.02 , _lowerCamelCase :int=1E-5 , _lowerCamelCase :Tuple=32 , _lowerCamelCase :List[str]=None , _lowerCamelCase :Dict=None , **_lowerCamelCase :List[Any] , ):
'''simple docstring'''
super().__init__(**_lowerCamelCase )
UpperCamelCase_ : Any =image_size
UpperCamelCase_ : int =patch_size
UpperCamelCase_ : int =num_channels
UpperCamelCase_ : Union[str, Any] =embed_dim
UpperCamelCase_ : int =use_conv_embed
UpperCamelCase_ : Optional[int] =hidden_sizes
UpperCamelCase_ : str =depths
UpperCamelCase_ : Any =focal_levels
UpperCamelCase_ : List[str] =focal_windows
UpperCamelCase_ : str =hidden_act
UpperCamelCase_ : Dict =mlp_ratio
UpperCamelCase_ : List[str] =hidden_dropout_prob
UpperCamelCase_ : Optional[int] =drop_path_rate
UpperCamelCase_ : str =use_layerscale
UpperCamelCase_ : List[Any] =layerscale_value
UpperCamelCase_ : Optional[int] =use_post_layernorm
UpperCamelCase_ : Dict =use_post_layernorm_in_modulation
UpperCamelCase_ : Optional[Any] =normalize_modulator
UpperCamelCase_ : List[Any] =initializer_range
UpperCamelCase_ : List[str] =layer_norm_eps
UpperCamelCase_ : List[Any] =encoder_stride
UpperCamelCase_ : str =['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase_ , UpperCamelCase_ : int =get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 357 | 0 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = 'T5Config'
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
lowercase : Dict = jnp.zeros_like(__lowerCAmelCase )
lowercase : Dict = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowercase : str = shifted_input_ids.at[:, 0].set(__lowerCAmelCase )
lowercase : int = jnp.where(shifted_input_ids == -1_00 , __lowerCAmelCase , __lowerCAmelCase )
return shifted_input_ids
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = '''mt5'''
_UpperCamelCase : Union[str, Any] = MTaConfig
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = '''mt5'''
_UpperCamelCase : str = MTaConfig
class _A ( _lowerCamelCase ):
_UpperCamelCase : Union[str, Any] = '''mt5'''
_UpperCamelCase : List[str] = MTaConfig | 709 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _A :
_UpperCamelCase : Dict = BlenderbotSmallConfig
_UpperCamelCase : int = {}
_UpperCamelCase : Optional[int] = '''gelu'''
def __init__( self : Union[str, Any] , _A : Union[str, Any] , _A : List[str]=13 , _A : Optional[int]=7 , _A : Optional[int]=True , _A : Any=False , _A : Optional[int]=99 , _A : Any=32 , _A : Optional[Any]=2 , _A : Any=4 , _A : int=37 , _A : List[str]=0.1 , _A : Union[str, Any]=0.1 , _A : int=20 , _A : Any=2 , _A : Optional[int]=1 , _A : str=0 , ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] = parent
lowercase : List[str] = batch_size
lowercase : int = seq_length
lowercase : Optional[int] = is_training
lowercase : str = use_labels
lowercase : Any = vocab_size
lowercase : int = hidden_size
lowercase : Dict = num_hidden_layers
lowercase : Tuple = num_attention_heads
lowercase : Dict = intermediate_size
lowercase : Tuple = hidden_dropout_prob
lowercase : str = attention_probs_dropout_prob
lowercase : Tuple = max_position_embeddings
lowercase : int = eos_token_id
lowercase : Tuple = pad_token_id
lowercase : List[Any] = bos_token_id
def __a ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase : Optional[Any] = prepare_blenderbot_small_inputs_dict(_A , _A , _A )
return config, inputs_dict
def __a ( self : Optional[int] , _A : Tuple , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = TFBlenderbotSmallModel(config=_A ).get_decoder()
lowercase : List[str] = inputs_dict['''input_ids''']
lowercase : Union[str, Any] = input_ids[:1, :]
lowercase : str = inputs_dict['''attention_mask'''][:1, :]
lowercase : str = inputs_dict['''head_mask''']
lowercase : Optional[int] = 1
# first forward pass
lowercase : Union[str, Any] = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
lowercase , lowercase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase : Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase : Any = model(_A , attention_mask=_A )[0]
lowercase : Union[str, Any] = model(_A , attention_mask=_A , past_key_values=_A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase : Any = output_from_no_past[:, -3:, random_slice_idx]
lowercase : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_A , _A , rtol=1E-3 )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , ) -> str:
'''simple docstring'''
if attention_mask is None:
lowercase : Optional[Any] = tf.cast(tf.math.not_equal(__magic_name__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _A ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_UpperCamelCase : int = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_UpperCamelCase : Any = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : int = False
def __a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = TFBlenderbotSmallModelTester(self )
lowercase : List[str] = ConfigTester(self , config_class=_A )
def __a ( self : Dict ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Dict ) -> int:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_tokenizers
@require_tf
class _A ( unittest.TestCase ):
_UpperCamelCase : Optional[Any] = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
_UpperCamelCase : Optional[Any] = '''facebook/blenderbot_small-90M'''
@cached_property
def __a ( self : Optional[int] ) -> int:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self.tokenizer(self.src_text , return_tensors='''tf''' )
lowercase : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_A , )
lowercase : List[str] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_A )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
) | 596 | 0 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A_ : str =logging.get_logger(__name__)
A_ : Tuple =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A_ : Any =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __a :
SCREAMING_SNAKE_CASE__ : str = field(
default=lowerCAmelCase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowerCAmelCase__ )} )
SCREAMING_SNAKE_CASE__ : str = field(
default=lowerCAmelCase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
SCREAMING_SNAKE_CASE__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE__ : int = field(
default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
SCREAMING_SNAKE_CASE__ : int = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
SCREAMING_SNAKE_CASE__ : int = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
SCREAMING_SNAKE_CASE__ : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
SCREAMING_SNAKE_CASE__ : bool = field(
default=lowerCAmelCase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
SCREAMING_SNAKE_CASE__ : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
SCREAMING_SNAKE_CASE__ : int = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
SCREAMING_SNAKE_CASE__ : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
SCREAMING_SNAKE_CASE__ : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = "train"
SCREAMING_SNAKE_CASE__ : Dict = "dev"
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : SquadDataTrainingArguments
SCREAMING_SNAKE_CASE__ : List[SquadFeatures]
SCREAMING_SNAKE_CASE__ : Split
SCREAMING_SNAKE_CASE__ : bool
def __init__( self , a__ , a__ , a__ = None , a__ = Split.train , a__ = False , a__ = None , a__ = "pt" , ):
_lowerCamelCase = args
_lowerCamelCase = is_language_sensitive
_lowerCamelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(a__ , a__ ):
try:
_lowerCamelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
_lowerCamelCase = mode
# Load data features from cache or dataset file
_lowerCamelCase = 'v2' if args.version_2_with_negative else 'v1'
_lowerCamelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase = cached_features_file + '.lock'
with FileLock(a__ ):
if os.path.exists(a__ ) and not args.overwrite_cache:
_lowerCamelCase = time.time()
_lowerCamelCase = torch.load(a__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase = self.old_features['features']
_lowerCamelCase = self.old_features.get('dataset' , a__ )
_lowerCamelCase = self.old_features.get('examples' , a__ )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
' future run' )
else:
if mode == Split.dev:
_lowerCamelCase = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=a__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=a__ , )
_lowerCamelCase = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , a__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
return len(self.features )
def __getitem__( self , a__ ):
# Convert to Tensors and build dataset
_lowerCamelCase = self.features[i]
_lowerCamelCase = torch.tensor(feature.input_ids , dtype=torch.long )
_lowerCamelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
_lowerCamelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
_lowerCamelCase = torch.tensor(feature.cls_index , dtype=torch.long )
_lowerCamelCase = torch.tensor(feature.p_mask , dtype=torch.float )
_lowerCamelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
_lowerCamelCase = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase = torch.tensor(feature.start_position , dtype=torch.long )
_lowerCamelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 650 |
"""simple docstring"""
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 1_000_000 )-> int:
_lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 650 | 1 |
def lowerCamelCase_ ( ) -> int:
'''simple docstring'''
A = 0
for i in range(1 , 1001 ):
total += i**i
return str(_snake_case )[-10:]
if __name__ == "__main__":
print(solution()) | 711 |
from collections import deque
from .hash_table import HashTable
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Dict , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[Any] ) -> Union[str, Any]:
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] ) -> int:
A = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__UpperCamelCase )
A = self.values[key]
def __UpperCamelCase ( self : Any ) -> List[str]:
return (
sum(self.charge_factor - len(__UpperCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=None ) -> Union[str, Any]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__UpperCamelCase ) == 0
):
return key
return super()._collision_resolution(__UpperCamelCase , __UpperCamelCase ) | 224 | 0 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowercase_ : Tuple = logging.get_logger(__name__)
def A__ ( snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : List[Any] ):
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def A__ ( snake_case_ : np.ndarray , snake_case_ : Optional[str] , snake_case_ : Optional[str] ):
SCREAMING_SNAKE_CASE__: str= to_pil_image(snake_case_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= pil_image.size
SCREAMING_SNAKE_CASE__: Optional[int]= pytesseract.image_to_data(snake_case_ , lang=snake_case_ , output_type='''dict''' , config=snake_case_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__: str= [idx for idx, word in enumerate(snake_case_ ) if not word.strip()]
SCREAMING_SNAKE_CASE__: List[Any]= [word for idx, word in enumerate(snake_case_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__: List[str]= [coord for idx, coord in enumerate(snake_case_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__: Tuple= [coord for idx, coord in enumerate(snake_case_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__: int= [coord for idx, coord in enumerate(snake_case_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__: Union[str, Any]= [coord for idx, coord in enumerate(snake_case_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__: List[str]= []
for x, y, w, h in zip(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE__: str= [x, y, x + w, y + h]
actual_boxes.append(snake_case_ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__: Dict= []
for box in actual_boxes:
normalized_boxes.append(normalize_box(snake_case_ , snake_case_ , snake_case_ ) )
assert len(snake_case_ ) == len(snake_case_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _lowerCamelCase ( UpperCamelCase_ ):
__a = ["pixel_values"]
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = "" , **lowerCAmelCase , ) -> None:
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= size if size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE__: List[str]= get_size_dict(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= do_resize
SCREAMING_SNAKE_CASE__: Any= size
SCREAMING_SNAKE_CASE__: Any= resample
SCREAMING_SNAKE_CASE__: Dict= do_rescale
SCREAMING_SNAKE_CASE__: Optional[Any]= rescale_value
SCREAMING_SNAKE_CASE__: Optional[Any]= do_normalize
SCREAMING_SNAKE_CASE__: Optional[int]= image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std if image_std is not None else IMAGENET_STANDARD_STD
SCREAMING_SNAKE_CASE__: Any= apply_ocr
SCREAMING_SNAKE_CASE__: int= ocr_lang
SCREAMING_SNAKE_CASE__: int= tesseract_config
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__: Any= get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= (size['''height'''], size['''width'''])
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ) -> np.ndarray:
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ) -> np.ndarray:
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase=None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE__: Tuple= do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__: List[Any]= size if size is not None else self.size
SCREAMING_SNAKE_CASE__: int= get_size_dict(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__: List[Any]= do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__: List[Any]= rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__: Optional[Any]= do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__: List[str]= image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__: Tuple= image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__: Optional[Any]= apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__: Union[str, Any]= ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__: List[str]= tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__: int= make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__: str= [to_numpy_array(lowerCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
SCREAMING_SNAKE_CASE__: Dict= []
SCREAMING_SNAKE_CASE__: Optional[Any]= []
for image in images:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= apply_tesseract(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
words_batch.append(lowerCAmelCase )
boxes_batch.append(lowerCAmelCase )
if do_resize:
SCREAMING_SNAKE_CASE__: str= [self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__: Tuple= [self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__: List[Any]= [self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE__: Optional[int]= [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE__: Dict= BatchFeature(data={'''pixel_values''': images} , tensor_type=lowerCAmelCase )
if apply_ocr:
SCREAMING_SNAKE_CASE__: List[Any]= words_batch
SCREAMING_SNAKE_CASE__: List[str]= boxes_batch
return data
| 64 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> float:
if digit_amount > 0:
return round(number - int(lowercase__ ) , lowercase__ )
return number - int(lowercase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 453 | 0 |
'''simple docstring'''
import re
def A__ ( _UpperCAmelCase : str ) -> str:
if len(re.findall("[ATCG]" , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a : Optional[Any] = float('''nan''')
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = sys.stdout
__snake_case = open(a_ , "a" )
def __getattr__( self : str , a_ : List[Any] ):
"""simple docstring"""
return getattr(self.stdout , a_ )
def A ( self : Union[str, Any] , a_ : List[Any] ):
"""simple docstring"""
self.stdout.write(a_ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) )
def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]:
__snake_case = []
# deal with critical env vars
__snake_case = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
__snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_UpperCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__snake_case = []
__snake_case = ""
while len(_UpperCAmelCase ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCAmelCase )
__snake_case = ""
return "\\\n".join(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
# unwrap multi-line input
__snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
__snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
__snake_case = variation.replace(" " , "-" )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
__snake_case = json.load(_UpperCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict:
__snake_case = []
__snake_case = []
__snake_case = F'''{id}: {variation:<{longest_variation_len}}'''
__snake_case = F'''{preamble}: '''
__snake_case = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ):
__snake_case = process_run_single(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCAmelCase ):
metrics.append(_UpperCAmelCase )
results.append(_UpperCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__snake_case = F'''\33[2K\r{outcome}'''
if len(_UpperCAmelCase ) > 0:
__snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__snake_case = round(mean_metrics[target_metric_key] , 2 )
__snake_case = F'''{outcome} {mean_target}'''
if len(_UpperCAmelCase ) > 1:
results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}'''
print(_UpperCAmelCase )
__snake_case = variation
return mean_metrics
else:
print(_UpperCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]:
__snake_case = pd.DataFrame(_UpperCAmelCase )
__snake_case = "variation"
__snake_case = "diff_%"
__snake_case = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCAmelCase ):
__snake_case = df.apply(
lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
__snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols
# capitalize
__snake_case = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
__snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_UpperCAmelCase ) )
def __UpperCAmelCase ( ) -> Dict:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
__snake_case = parser.parse_args()
__snake_case = args.output_dir
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
__snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase )
# split each dimension into its --foo variations
__snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) )
__snake_case = max(len(_UpperCAmelCase ) for x in variations )
# split wanted keys
__snake_case = args.report_metric_keys.split()
# capture prints into a log file for convenience
__snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__snake_case = Tee(_UpperCAmelCase )
print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' )
print(F'''Base command: {" ".join(_UpperCAmelCase )}''' )
__snake_case = "variation"
__snake_case = []
for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ):
__snake_case = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) )
process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ):
snake_case__ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__UpperCamelCase )
snake_case__ : Tuple = -1
snake_case__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
snake_case__ : Dict = model.generate(__UpperCamelCase , max_new_tokens=1_0 , do_sample=__UpperCamelCase )
snake_case__ : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
snake_case__ : int = TextStreamer(__UpperCamelCase )
model.generate(__UpperCamelCase , max_new_tokens=1_0 , do_sample=__UpperCamelCase , streamer=__UpperCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case__ : Union[str, Any] = cs.out[:-1]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase ( self : int ):
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case__ : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__UpperCamelCase )
snake_case__ : int = -1
snake_case__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
snake_case__ : List[Any] = model.generate(__UpperCamelCase , max_new_tokens=1_0 , do_sample=__UpperCamelCase )
snake_case__ : Any = tokenizer.decode(greedy_ids[0] )
snake_case__ : Any = TextIteratorStreamer(__UpperCamelCase )
snake_case__ : List[str] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
snake_case__ : List[str] = Thread(target=model.generate , kwargs=__UpperCamelCase )
thread.start()
snake_case__ : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase ( self : int ):
snake_case__ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case__ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__UpperCamelCase )
snake_case__ : Tuple = -1
snake_case__ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
snake_case__ : str = model.generate(__UpperCamelCase , max_new_tokens=1_0 , do_sample=__UpperCamelCase )
snake_case__ : Optional[Any] = greedy_ids[:, input_ids.shape[1] :]
snake_case__ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
snake_case__ : str = TextStreamer(__UpperCamelCase , skip_prompt=__UpperCamelCase )
model.generate(__UpperCamelCase , max_new_tokens=1_0 , do_sample=__UpperCamelCase , streamer=__UpperCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case__ : List[Any] = cs.out[:-1]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase ( self : List[Any] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
snake_case__ : List[str] = AutoTokenizer.from_pretrained("""distilgpt2""" )
snake_case__ : Tuple = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(__UpperCamelCase )
snake_case__ : Any = -1
snake_case__ : Dict = torch.ones((1, 5) , device=__UpperCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
snake_case__ : List[str] = TextStreamer(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
model.generate(__UpperCamelCase , max_new_tokens=1 , do_sample=__UpperCamelCase , streamer=__UpperCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
snake_case__ : List[Any] = cs.out[:-1] # Remove the final "\n"
snake_case__ : List[str] = tokenizer(__UpperCamelCase , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowercase ( self : Any ):
snake_case__ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case__ : str = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__UpperCamelCase )
snake_case__ : Any = -1
snake_case__ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
snake_case__ : str = TextIteratorStreamer(__UpperCamelCase , timeout=0.001 )
snake_case__ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
snake_case__ : int = Thread(target=model.generate , kwargs=__UpperCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__UpperCamelCase ):
snake_case__ : str = """"""
for new_text in streamer:
streamer_text += new_text
| 648 |
import os
import time
import numpy as np
import onnxruntime as ort
__snake_case :Any ='1'
__snake_case :List[str] ='0'
__snake_case :Union[str, Any] ='1'
__snake_case :Optional[Any] =ort.SessionOptions()
__snake_case :List[str] =ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
__snake_case :int =['TensorrtExecutionProvider', 'CUDAExecutionProvider']
__snake_case :Optional[Any] =ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
__snake_case :int =ort.RunOptions()
__snake_case :Tuple =128
__snake_case :List[Any] =1
__snake_case :Union[str, Any] =np.ones((batch, sequence), dtype=np.intaa)
__snake_case :Optional[Any] =np.ones((batch, sequence), dtype=np.intaa)
__snake_case :Optional[int] =np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
__snake_case :Union[str, Any] =time.time()
__snake_case :int =2000
__snake_case :Optional[int] ={}
for iter in range(max_iters):
__snake_case :Optional[Any] =sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1000 / max_iters)) | 106 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = """ctrl"""
UpperCAmelCase__ = ["""past_key_values"""]
UpperCAmelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , a__=24_6534 , a__=256 , a__=1280 , a__=8192 , a__=48 , a__=16 , a__=0.1 , a__=0.1 , a__=1e-6 , a__=0.02 , a__=True , **a__ , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = n_positions
_lowerCamelCase : Tuple = n_embd
_lowerCamelCase : int = n_layer
_lowerCamelCase : Dict = n_head
_lowerCamelCase : Optional[int] = dff
_lowerCamelCase : Optional[int] = resid_pdrop
_lowerCamelCase : int = embd_pdrop
_lowerCamelCase : Optional[Any] = layer_norm_epsilon
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[str] = use_cache
super().__init__(**a__)
| 701 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __A ( lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = BertJapaneseTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def __snake_case ( self):
"""simple docstring"""
super().setUp()
_lowerCamelCase : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
_lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Dict = '''こんにちは、世界。 \nこんばんは、世界。'''
_lowerCamelCase : Union[str, Any] = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : int = self.get_input_output_texts(a__)
_lowerCamelCase : Optional[Any] = tokenizer.encode(a__ , add_special_tokens=a__)
_lowerCamelCase : str = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__)
return text, ids
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = self.tokenizer_class(self.vocab_file)
_lowerCamelCase : Tuple = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''')
self.assertListEqual(a__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''')
self.assertIsNotNone(a__)
_lowerCamelCase : List[str] = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : int = tokenizer.tokenize(a__)
self.assertListEqual(a__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(a__ , '''wb''') as handle:
pickle.dump(a__ , a__)
with open(a__ , '''rb''') as handle:
_lowerCamelCase : List[str] = pickle.load(a__)
_lowerCamelCase : Dict = tokenizer_new.tokenize(a__)
self.assertListEqual(a__ , a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = MecabTokenizer(mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
try:
_lowerCamelCase : str = MecabTokenizer(mecab_dic='''unidic_lite''')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
try:
_lowerCamelCase : int = MecabTokenizer(mecab_dic='''unidic''')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = MecabTokenizer(do_lower_case=a__ , mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
try:
_lowerCamelCase : Any = MecabTokenizer(
do_lower_case=a__ , normalize_text=a__ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''')
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = MecabTokenizer(normalize_text=a__ , mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''')
self.assertIsNotNone(a__)
_lowerCamelCase : Tuple = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : Tuple = tokenizer.tokenize(a__)
self.assertListEqual(a__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(a__ , '''wb''') as handle:
pickle.dump(a__ , a__)
with open(a__ , '''rb''') as handle:
_lowerCamelCase : str = pickle.load(a__)
_lowerCamelCase : Any = tokenizer_new.tokenize(a__)
self.assertListEqual(a__ , a__)
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = SudachiTokenizer(sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国''', '''人''', '''参政''', '''権'''])
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国人''', '''参政権'''])
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国人参政権'''])
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Tuple = SudachiTokenizer(do_lower_case=a__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SudachiTokenizer(normalize_text=a__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = SudachiTokenizer(trim_whitespace=a__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''')
self.assertIsNotNone(a__)
_lowerCamelCase : List[str] = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : List[str] = tokenizer.tokenize(a__)
self.assertListEqual(a__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(a__ , '''wb''') as handle:
pickle.dump(a__ , a__)
with open(a__ , '''rb''') as handle:
_lowerCamelCase : Optional[Any] = pickle.load(a__)
_lowerCamelCase : Dict = tokenizer_new.tokenize(a__)
self.assertListEqual(a__ , a__)
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = JumanppTokenizer(do_lower_case=a__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = JumanppTokenizer(normalize_text=a__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = JumanppTokenizer(trim_whitespace=a__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''') , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
_lowerCamelCase : List[str] = {}
for i, token in enumerate(a__):
_lowerCamelCase : Optional[Any] = i
_lowerCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=a__ , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''こんにちは''') , ['''こんにちは'''])
self.assertListEqual(tokenizer.tokenize('''こんばんは''') , ['''こん''', '''##ばんは'''])
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''') , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Tuple = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''')
_lowerCamelCase : str = tokenizer.subword_tokenizer
_lowerCamelCase : Any = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''')
self.assertListEqual(a__ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''])
_lowerCamelCase : str = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''')
self.assertListEqual(a__ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''')
_lowerCamelCase : int = tokenizer.encode('''ありがとう。''' , add_special_tokens=a__)
_lowerCamelCase : Union[str, Any] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=a__)
_lowerCamelCase : int = tokenizer.build_inputs_with_special_tokens(a__)
_lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(a__ , a__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __A ( lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = BertJapaneseTokenizer
UpperCAmelCase__ = False
def __snake_case ( self):
"""simple docstring"""
super().setUp()
_lowerCamelCase : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def __snake_case ( self , **a__):
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **a__)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Any = '''こんにちは、世界。 \nこんばんは、世界。'''
_lowerCamelCase : int = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''')
_lowerCamelCase : Optional[int] = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''')
self.assertListEqual(
a__ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
_lowerCamelCase : List[str] = {}
for i, token in enumerate(a__):
_lowerCamelCase : List[Any] = i
_lowerCamelCase : Union[str, Any] = CharacterTokenizer(vocab=a__ , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''こんにちは''') , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''])
self.assertListEqual(tokenizer.tokenize('''こんにちほ''') , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''')
_lowerCamelCase : List[str] = tokenizer.encode('''ありがとう。''' , add_special_tokens=a__)
_lowerCamelCase : Optional[int] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=a__)
_lowerCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a__)
_lowerCamelCase : int = tokenizer.build_inputs_with_special_tokens(a__ , a__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = '''cl-tohoku/bert-base-japanese'''
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(a__)
self.assertIsInstance(a__ , a__)
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''') as cm:
BertTokenizer.from_pretrained(a__)
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.'''))
_lowerCamelCase : List[Any] = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''') as cm:
BertJapaneseTokenizer.from_pretrained(a__)
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.'''))
| 613 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Tuple ):
__UpperCAmelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__UpperCAmelCase = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__UpperCAmelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__UpperCAmelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , _lowercase )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowercase ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowercase ) + '''\n''' )
# load decoder from hub
__UpperCAmelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def a ( self : Any , **_lowercase : Optional[int] ):
__UpperCAmelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowercase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def a ( self : Optional[Any] , **_lowercase : Any ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowercase )
def a ( self : Optional[Any] , **_lowercase : Optional[Any] ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowercase )
def a ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def a ( self : Tuple ):
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_feature_extractor()
__UpperCAmelCase = self.get_decoder()
__UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowercase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _lowercase )
def a ( self : Dict ):
__UpperCAmelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ):
__UpperCAmelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowercase , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowercase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : Dict ):
__UpperCAmelCase = self.get_feature_extractor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_decoder()
__UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
__UpperCAmelCase = floats_list((3, 10_00) )
__UpperCAmelCase = feature_extractor(_lowercase , return_tensors='''np''' )
__UpperCAmelCase = processor(_lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a ( self : str ):
__UpperCAmelCase = self.get_feature_extractor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_decoder()
__UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
__UpperCAmelCase = '''This is a test string'''
__UpperCAmelCase = processor(text=_lowercase )
__UpperCAmelCase = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Optional[int] , _lowercase : Any=(2, 10, 16) , _lowercase : str=77 ):
np.random.seed(_lowercase )
return np.random.rand(*_lowercase )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.get_feature_extractor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_decoder()
__UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
__UpperCAmelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__UpperCAmelCase = processor.decode(_lowercase )
__UpperCAmelCase = decoder.decode_beams(_lowercase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def a ( self : int , _lowercase : Dict ):
__UpperCAmelCase = self.get_feature_extractor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_decoder()
__UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
__UpperCAmelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__UpperCAmelCase = processor.batch_decode(_lowercase )
else:
with get_context(_lowercase ).Pool() as pool:
__UpperCAmelCase = processor.batch_decode(_lowercase , _lowercase )
__UpperCAmelCase = list(_lowercase )
with get_context('''fork''' ).Pool() as p:
__UpperCAmelCase = decoder.decode_beams_batch(_lowercase , _lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowercase , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(_lowercase , decoded_processor.logit_score )
self.assertListEqual(_lowercase , decoded_processor.lm_score )
def a ( self : Dict ):
__UpperCAmelCase = self.get_feature_extractor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_decoder()
__UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
__UpperCAmelCase = self._get_dummy_logits()
__UpperCAmelCase = 15
__UpperCAmelCase = -20.0
__UpperCAmelCase = -4.0
__UpperCAmelCase = processor.batch_decode(
_lowercase , beam_width=_lowercase , beam_prune_logp=_lowercase , token_min_logp=_lowercase , )
__UpperCAmelCase = decoded_processor_out.text
__UpperCAmelCase = list(_lowercase )
with get_context('''fork''' ).Pool() as pool:
__UpperCAmelCase = decoder.decode_beams_batch(
_lowercase , _lowercase , beam_width=_lowercase , beam_prune_logp=_lowercase , token_min_logp=_lowercase , )
__UpperCAmelCase = [d[0][0] for d in decoded_decoder_out]
__UpperCAmelCase = [d[0][2] for d in decoded_decoder_out]
__UpperCAmelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _lowercase )
self.assertTrue(np.array_equal(_lowercase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _lowercase , atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowercase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , _lowercase , atol=1E-3 ) )
def a ( self : int ):
__UpperCAmelCase = self.get_feature_extractor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_decoder()
__UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
__UpperCAmelCase = self._get_dummy_logits()
__UpperCAmelCase = 2.0
__UpperCAmelCase = 5.0
__UpperCAmelCase = -20.0
__UpperCAmelCase = True
__UpperCAmelCase = processor.batch_decode(
_lowercase , alpha=_lowercase , beta=_lowercase , unk_score_offset=_lowercase , lm_score_boundary=_lowercase , )
__UpperCAmelCase = decoded_processor_out.text
__UpperCAmelCase = list(_lowercase )
decoder.reset_params(
alpha=_lowercase , beta=_lowercase , unk_score_offset=_lowercase , lm_score_boundary=_lowercase , )
with get_context('''fork''' ).Pool() as pool:
__UpperCAmelCase = decoder.decode_beams_batch(
_lowercase , _lowercase , )
__UpperCAmelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _lowercase )
__UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _lowercase )
def a ( self : Dict ):
__UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
__UpperCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__UpperCAmelCase = os.listdir(_lowercase )
__UpperCAmelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowercase , _lowercase )
def a ( self : List[Any] ):
__UpperCAmelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(_lowercase )
__UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
__UpperCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__UpperCAmelCase = os.listdir(_lowercase )
__UpperCAmelCase = os.listdir(_lowercase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowercase , _lowercase )
def a ( self : int ):
__UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__UpperCAmelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__UpperCAmelCase = floats_list((3, 10_00) )
__UpperCAmelCase = processor_wavaveca(_lowercase , return_tensors='''np''' )
__UpperCAmelCase = processor_auto(_lowercase , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__UpperCAmelCase = self._get_dummy_logits()
__UpperCAmelCase = processor_wavaveca.batch_decode(_lowercase )
__UpperCAmelCase = processor_auto.batch_decode(_lowercase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_feature_extractor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_decoder()
__UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def a ( _lowercase : List[Any] , _lowercase : Optional[Any] ):
__UpperCAmelCase = [d[key] for d in offsets]
return retrieved_list
def a ( self : Tuple ):
__UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__UpperCAmelCase = self._get_dummy_logits()[0]
__UpperCAmelCase = processor.decode(_lowercase , output_word_offsets=_lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowercase , _lowercase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def a ( self : str ):
__UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__UpperCAmelCase = self._get_dummy_logits()
__UpperCAmelCase = processor.batch_decode(_lowercase , output_word_offsets=_lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowercase , _lowercase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Union[str, Any] ):
import torch
__UpperCAmelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_lowercase )
__UpperCAmelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
__UpperCAmelCase = iter(_lowercase )
__UpperCAmelCase = next(_lowercase )
__UpperCAmelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__UpperCAmelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__UpperCAmelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
__UpperCAmelCase = model(_lowercase ).logits.cpu().numpy()
__UpperCAmelCase = processor.decode(logits[0] , output_word_offsets=_lowercase )
__UpperCAmelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__UpperCAmelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__UpperCAmelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) , _lowercase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) , output.text )
# output times
__UpperCAmelCase = torch.tensor(self.get_from_offsets(_lowercase , '''start_time''' ) )
__UpperCAmelCase = torch.tensor(self.get_from_offsets(_lowercase , '''end_time''' ) )
# fmt: off
__UpperCAmelCase = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
__UpperCAmelCase = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=0.01 ) )
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=0.01 ) )
| 49 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : int ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
A: Optional[int] = 4
A: Tuple = (1 << p) - 1
for _ in range(p - 2 ):
A: List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 135 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : list[int] ):
'''simple docstring'''
UpperCamelCase__ = []
if len(UpperCamelCase__ ) == 1:
return [nums.copy()]
for _ in range(len(UpperCamelCase__ ) ):
UpperCamelCase__ = nums.pop(0 )
UpperCamelCase__ = permute(UpperCamelCase__ )
for perm in permutations:
perm.append(UpperCamelCase__ )
result.extend(UpperCamelCase__ )
nums.append(UpperCamelCase__ )
return result
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
def backtrack(UpperCamelCase__ : List[Any] ):
if start == len(UpperCamelCase__ ) - 1:
output.append(nums[:] )
else:
for i in range(UpperCamelCase__, len(UpperCamelCase__ ) ):
UpperCamelCase__ , UpperCamelCase__ = nums[i], nums[start]
backtrack(start + 1 )
UpperCamelCase__ , UpperCamelCase__ = nums[i], nums[start] # backtrack
UpperCamelCase__ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowercase = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 591 | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int]=None ):
'''simple docstring'''
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('''tpu-config''', description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('''Accelerate tpu-config command''', description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'''Config Arguments''', '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''', type=UpperCamelCase__, default=UpperCamelCase__, help='''Path to the config file to use for accelerate.''', )
config_args.add_argument(
'''--tpu_name''', default=UpperCamelCase__, help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''', )
config_args.add_argument(
'''--tpu_zone''', default=UpperCamelCase__, help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''', )
UpperCamelCase__ = parser.add_argument_group('''TPU Arguments''', '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''', action='''store_true''', help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''', )
pod_args.add_argument(
'''--command_file''', default=UpperCamelCase__, help='''The path to the file containing the commands to run on the pod on startup.''', )
pod_args.add_argument(
'''--command''', action='''append''', nargs='''+''', help='''A command to run on the pod. Can be passed multiple times.''', )
pod_args.add_argument(
'''--install_accelerate''', action='''store_true''', help='''Whether to install accelerate on the pod. Defaults to False.''', )
pod_args.add_argument(
'''--accelerate_version''', default='''latest''', help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''', )
pod_args.add_argument(
'''--debug''', action='''store_true''', help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCamelCase__ ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCamelCase__ = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ), UpperCamelCase__ ):
UpperCamelCase__ = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file, '''r''' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0], UpperCamelCase__ ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
UpperCamelCase__ = '''; '''.join(UpperCamelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {" ".join(UpperCamelCase__ )}""" )
return
subprocess.run(UpperCamelCase__ )
print('''Successfully setup pod.''' )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(UpperCamelCase__ )
| 591 | 1 |
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : str ) -> Any:
"""simple docstring"""
a_ : str = (boundary[1] - boundary[0]) / steps
a_ : str = boundary[0]
a_ : Dict = boundary[1]
a_ : List[str] = make_points(__A , __A , __A )
a_ : str = 0.0
y += (h / 2.0) * f(__A )
for i in x_i:
# print(i)
y += h * f(__A )
y += (h / 2.0) * f(__A )
return y
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : List[str] , __A : Any ) -> List[str]:
"""simple docstring"""
a_ : Tuple = a + h
while x < (b - h):
yield x
a_ : Dict = x + h
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> Dict: # enter your function here
"""simple docstring"""
a_ : int = (x - 0) * (x - 0)
return y
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
"""simple docstring"""
a_ : int = 0.0 # Lower bound of integration
a_ : Optional[int] = 1.0 # Upper bound of integration
a_ : List[Any] = 10.0 # define number of steps or resolution
a_ : Tuple = [a, b] # define boundary of integration
a_ : Union[str, Any] = method_a(__A , __A )
print(F"""y = {y}""" )
if __name__ == "__main__":
main()
| 570 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCAmelCase_ : Any = Lock()
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : List[str] , __A : int , __A : Union[str, Any] , __A : Any , __A : Any , __A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
a_ : Union[str, Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
a_ : Optional[int] = min(__A , __A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
a_ : Optional[int] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
a_ : Optional[int] = max(__A , __A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__A )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> List[str]:
"""simple docstring"""
a_ : Tuple = []
a_ : Optional[int] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
a_ : List[str] = Pipe()
a_ : Any = Pipe()
process_array_.append(
Process(
target=__A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
a_ : Any = temp_rs
a_ : Optional[int] = temp_rr
for i in range(1 , len(__A ) - 1 ):
a_ : Union[str, Any] = Pipe()
a_ : Tuple = Pipe()
process_array_.append(
Process(
target=__A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
a_ : Optional[int] = temp_rs
a_ : Any = temp_rr
process_array_.append(
Process(
target=__A , args=(
len(__A ) - 1,
arr[len(__A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__A ) ):
a_ : Any = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
"""simple docstring"""
a_ : List[str] = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*__A )
a_ : List[Any] = odd_even_transposition(__A )
print('Sorted List\n' )
print(*__A )
if __name__ == "__main__":
main()
| 570 | 1 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCamelCase = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
__lowerCamelCase = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
)
__lowerCamelCase = "|".join(sys.argv[1:])
__lowerCamelCase = re.compile(RF'''^({joined_dirs}).*?\.py$''')
__lowerCamelCase = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 536 | """simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> str:
A__ = get_activation('swish' )
self.assertIsInstance(__UpperCAmelCase ,nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def snake_case__ ( self ) -> Optional[Any]:
A__ = get_activation('silu' )
self.assertIsInstance(__UpperCAmelCase ,nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def snake_case__ ( self ) -> List[str]:
A__ = get_activation('mish' )
self.assertIsInstance(__UpperCAmelCase ,nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def snake_case__ ( self ) -> List[str]:
A__ = get_activation('gelu' )
self.assertIsInstance(__UpperCAmelCase ,nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
| 536 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , a__ , a__=7 , a__=3 , a__=18 , a__=30 , a__=400 , a__=True , a__=32 , a__=True , ):
__SCREAMING_SNAKE_CASE : List[str] = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : int = num_channels
__SCREAMING_SNAKE_CASE : Optional[Any] = image_size
__SCREAMING_SNAKE_CASE : Any = min_resolution
__SCREAMING_SNAKE_CASE : Optional[Any] = max_resolution
__SCREAMING_SNAKE_CASE : List[str] = do_resize
__SCREAMING_SNAKE_CASE : Union[str, Any] = size_divisor
__SCREAMING_SNAKE_CASE : Dict = do_rescale
def a_ ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Dict = GLPNImageProcessor if is_vision_available() else None
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = GLPNImageProcessingTester(self )
@property
def a_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , "do_resize" ) )
self.assertTrue(hasattr(a__ , "size_divisor" ) )
self.assertTrue(hasattr(a__ , "resample" ) )
self.assertTrue(hasattr(a__ , "do_rescale" ) )
def a_ ( self ):
pass
def a_ ( self ):
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def a_ ( self ):
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def a_ ( self ):
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 211 | 0 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _lowerCAmelCase ( _lowerCAmelCase = 8 ):
'''simple docstring'''
A_ : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
i -= len(_lowerCAmelCase )
A_ : List[str] = i // 3
A_ : Tuple = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
A_ : int = (
chars_incl
+ random(_lowerCAmelCase ,quotient + remainder )
+ random(_lowerCAmelCase ,_lowerCAmelCase )
+ random(_lowerCAmelCase ,_lowerCAmelCase )
)
A_ : List[Any] = list(_lowerCAmelCase )
shuffle(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
# random is a generalised function for letters, characters and numbers
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
return "".join(secrets.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
pass # Put your code here...
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
pass # Put your code here...
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
pass # Put your code here...
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase = 8 ):
'''simple docstring'''
if len(_lowerCAmelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
A_ : List[Any] = any(char in ascii_uppercase for char in password )
A_ : int = any(char in ascii_lowercase for char in password )
A_ : Dict = any(char in digits for char in password )
A_ : int = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : int = int(input("""Please indicate the max length of your password: """ ).strip() )
A_ : Tuple = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" ,password_generator(_lowerCAmelCase ) )
print(
"""Alternative Password generated:""" ,alternative_password_generator(_lowerCAmelCase ,_lowerCAmelCase ) ,)
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 481 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _UpperCAmelCase :
def __init__( self , a__ ):
if isinstance(a__ , a__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
A_ : Optional[Any] = deepcopy(a__ )
elif os.path.exists(a__ ):
with io.open(a__ , """r""" , encoding="""utf-8""" ) as f:
A_ : str = json.load(a__ )
else:
try:
A_ : Dict = baseaa.urlsafe_baadecode(a__ ).decode("""utf-8""" )
A_ : List[Any] = json.loads(a__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
A_ : Any = config
self.set_stage_and_offload()
def _lowerCamelCase ( self ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
A_ : List[str] = self.get_value("""zero_optimization.stage""" , -1 )
# offload
A_ : Any = False
if self.is_zeroa() or self.is_zeroa():
A_ : Optional[int] = set(["""cpu""", """nvme"""] )
A_ : Dict = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
A_ : Tuple = True
def _lowerCamelCase ( self , a__ ):
A_ : List[Any] = self.config
# find the config node of interest if it exists
A_ : Optional[Any] = ds_key_long.split(""".""" )
A_ : Union[str, Any] = nodes.pop()
for node in nodes:
A_ : List[str] = config.get(a__ )
if config is None:
return None, ds_key
return config, ds_key
def _lowerCamelCase ( self , a__ , a__=None ):
A_ , A_ : Union[str, Any] = self.find_config_node(a__ )
if config is None:
return default
return config.get(a__ , a__ )
def _lowerCamelCase ( self , a__ , a__=False ):
A_ : Union[str, Any] = self.config
# find the config node of interest if it exists
A_ : str = ds_key_long.split(""".""" )
for node in nodes:
A_ : int = config
A_ : int = config.get(a__ )
if config is None:
if must_exist:
raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(a__ )
def _lowerCamelCase ( self , a__ ):
A_ : Optional[Any] = self.get_value(a__ )
return False if value is None else bool(a__ )
def _lowerCamelCase ( self , a__ ):
A_ : Optional[Any] = self.get_value(a__ )
return False if value is None else not bool(a__ )
def _lowerCamelCase ( self ):
return self._stage == 2
def _lowerCamelCase ( self ):
return self._stage == 3
def _lowerCamelCase ( self ):
return self._offload
class _UpperCAmelCase :
def __init__( self , a__ ):
A_ : Any = engine
def _lowerCamelCase ( self , a__ , **a__ ):
# runs backpropagation and handles mixed precision
self.engine.backward(a__ , **a__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , a__ ):
super().__init__(a__ , device_placement=a__ , scaler=a__ )
A_ : Dict = hasattr(self.optimizer , """overflow""" )
def _lowerCamelCase ( self , a__=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _lowerCamelCase ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _lowerCamelCase ( self ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , a__ , a__ ):
super().__init__(a__ , a__ )
def _lowerCamelCase ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _UpperCAmelCase :
def __init__( self , a__ , a__=0.001 , a__=0 , **a__ ):
A_ : List[str] = params
A_ : Any = lr
A_ : int = weight_decay
A_ : Optional[int] = kwargs
class _UpperCAmelCase :
def __init__( self , a__ , a__=None , a__=0 , **a__ ):
A_ : Union[str, Any] = optimizer
A_ : int = total_num_steps
A_ : Any = warmup_num_steps
A_ : int = kwargs
| 481 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[int | str] ):
create_state_space_tree(UpperCamelCase__ , [] , 0 , [0 for i in range(len(UpperCamelCase__ ) )] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[int | str] , UpperCamelCase__: list[int | str] , UpperCamelCase__: int , UpperCamelCase__: list[int] , ):
if index == len(UpperCamelCase__ ):
print(UpperCamelCase__ )
return
for i in range(len(UpperCamelCase__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE__ = True
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 , UpperCamelCase__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE__ = False
_lowerCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCamelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 6 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__A : int = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : Tuple ):
'''simple docstring'''
inspect_dataset(A__ , A__ )
lowerCAmelCase_ : Any = path + """.py"""
assert script_name in os.listdir(A__ )
assert "__pycache__" not in os.listdir(A__ )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def UpperCamelCase_ ( A__ : Tuple , A__ : str ):
'''simple docstring'''
inspect_metric(A__ , A__ )
lowerCAmelCase_ : Any = path + """.py"""
assert script_name in os.listdir(A__ )
assert "__pycache__" not in os.listdir(A__ )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def UpperCamelCase_ ( A__ : List[str] , A__ : List[str] , A__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = get_dataset_config_info(A__ , config_name=A__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def UpperCamelCase_ ( A__ : List[str] , A__ : List[str] , A__ : int ):
'''simple docstring'''
with pytest.raises(A__ ):
get_dataset_config_info(A__ , config_name=A__ )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def UpperCamelCase_ ( A__ : str , A__ : List[str] ):
'''simple docstring'''
lowerCAmelCase_ : Dict = get_dataset_config_names(A__ )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def UpperCamelCase_ ( A__ : List[Any] , A__ : Dict , A__ : Optional[int] ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = get_dataset_infos(A__ )
assert list(infos.keys() ) == expected_configs
lowerCAmelCase_ : Optional[int] = expected_configs[0]
assert expected_config in infos
lowerCAmelCase_ : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def UpperCamelCase_ ( A__ : List[str] , A__ : Optional[Any] , A__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase_ : Dict = get_dataset_infos(A__ )
assert expected_config in infos
lowerCAmelCase_ : List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def UpperCamelCase_ ( A__ : List[str] , A__ : Any , A__ : Dict ):
'''simple docstring'''
with pytest.raises(A__ ):
get_dataset_split_names(A__ , config_name=A__ )
| 275 | 0 |
import string
def __A(lowerCAmelCase ) -> str:
"""simple docstring"""
_UpperCamelCase = """"""
for i in sequence:
_UpperCamelCase = ord(lowerCAmelCase )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def __A(lowerCAmelCase ) -> str:
"""simple docstring"""
_UpperCamelCase = string.ascii_letters
_UpperCamelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase )] if c in letters else c for c in sequence )
def __A() -> None:
"""simple docstring"""
from timeit import timeit
print("""Running performance benchmarks...""" )
_UpperCamelCase = """from string import printable ; from __main__ import atbash, atbash_slow"""
print(F'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowerCAmelCase )} seconds' )
print(F'> atbash(): {timeit("atbash(printable)" , setup=lowerCAmelCase )} seconds' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 202 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = "x" , lowerCAmelCase = 1_0**-1_0 , lowerCAmelCase = 1 , ) -> complex:
"""simple docstring"""
_UpperCamelCase = symbols(lowerCAmelCase )
_UpperCamelCase = lambdify(lowerCAmelCase , lowerCAmelCase )
_UpperCamelCase = lambdify(lowerCAmelCase , diff(lowerCAmelCase , lowerCAmelCase ) )
_UpperCamelCase = starting_point
while True:
if diff_function(lowerCAmelCase ) != 0:
_UpperCamelCase = prev_guess - multiplicity * func(lowerCAmelCase ) / diff_function(
lowerCAmelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCamelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F"""{newton_raphson("exp(x) - 1", 10, precision=0.0_05)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 202 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )['last_hidden_state']
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) ) | 31 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCAmelCase_ ( __UpperCamelCase = 8 ):
SCREAMING_SNAKE_CASE__ =ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =i // 3
SCREAMING_SNAKE_CASE__ =i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
SCREAMING_SNAKE_CASE__ =(
chars_incl
+ random(__UpperCamelCase, quotient + remainder )
+ random(__UpperCamelCase, __UpperCamelCase )
+ random(__UpperCamelCase, __UpperCamelCase )
)
SCREAMING_SNAKE_CASE__ =list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
pass # Put your code here...
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
pass # Put your code here...
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
pass # Put your code here...
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase = 8 ):
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
SCREAMING_SNAKE_CASE__ =any(char in ascii_uppercase for char in password )
SCREAMING_SNAKE_CASE__ =any(char in ascii_lowercase for char in password )
SCREAMING_SNAKE_CASE__ =any(char in digits for char in password )
SCREAMING_SNAKE_CASE__ =any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =int(input("""Please indicate the max length of your password: """ ).strip() )
SCREAMING_SNAKE_CASE__ =input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""", password_generator(__UpperCamelCase ) )
print(
"""Alternative Password generated:""", alternative_password_generator(__UpperCamelCase, __UpperCamelCase ), )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 151 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowerCamelCase = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
lowerCamelCase = {
"""RUCAIBox/mvp""": 1_024,
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = MvpTokenizer
def __init__( self : List[str] , _UpperCAmelCase : Dict=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Any="replace" , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : Any="</s>" , _UpperCAmelCase : Tuple="</s>" , _UpperCAmelCase : List[Any]="<s>" , _UpperCAmelCase : Any="<unk>" , _UpperCAmelCase : Any="<pad>" , _UpperCAmelCase : Optional[Any]="<mask>" , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : str=True , **_UpperCAmelCase : Dict , ) -> Tuple:
'''simple docstring'''
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _UpperCAmelCase ) != add_prefix_space:
UpperCAmelCase_ = getattr(_UpperCAmelCase , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**_UpperCAmelCase )
UpperCAmelCase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase_ = "post_processor"
UpperCAmelCase_ = getattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
if tokenizer_component_instance:
UpperCAmelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_ = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase_ = tuple(state["cls"] )
UpperCAmelCase_ = False
if state.get("add_prefix_space" , _UpperCAmelCase ) != add_prefix_space:
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = True
if state.get("trim_offsets" , _UpperCAmelCase ) != trim_offsets:
UpperCAmelCase_ = trim_offsets
UpperCAmelCase_ = True
if changes_to_apply:
UpperCAmelCase_ = getattr(_UpperCAmelCase , state.pop("type" ) )
UpperCAmelCase_ = component_class(**_UpperCAmelCase )
setattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
@property
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else value
UpperCAmelCase_ = value
def lowercase__ ( self : Optional[int] , *_UpperCAmelCase : Any , **_UpperCAmelCase : Tuple ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase_ = kwargs.get("is_split_into_words" , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Tuple , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[str] ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase_ = kwargs.get("is_split_into_words" , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase_ = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def lowercase__ ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str=None ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 82 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
__a = XCLIPTextConfig()
# derive patch size from model name
__a = model_name.find("""patch""" )
__a = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
__a = XCLIPVisionConfig(patch_size=__SCREAMING_SNAKE_CASE , num_frames=__SCREAMING_SNAKE_CASE )
if "large" in model_name:
__a = 768
__a = 3072
__a = 12
__a = 1024
__a = 4096
__a = 16
__a = 24
__a = 768
__a = 3072
if model_name == "xclip-large-patch14-16-frames":
__a = 336
__a = XCLIPConfig.from_text_vision_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "large" in model_name:
__a = 768
return config
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if name == "token_embedding.weight":
__a = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
__a = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
__a = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__a = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__a = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__a = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
__a = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
__a = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
__a = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
__a = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
__a = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
__a = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
__a = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
__a = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
__a = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
__a = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
__a = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
__a = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
__a = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
__a = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
__a = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
__a = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "attn.in_proj" in key:
__a = key.split(""".""" )
if key.startswith("""visual""" ):
__a = key_split[3]
__a = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__a = val[
:dim, :
]
__a = val[
dim : dim * 2, :
]
__a = val[
-dim:, :
]
else:
__a = val[
:dim
]
__a = val[
dim : dim * 2
]
__a = val[
-dim:
]
else:
if "weight" in key:
__a = val[
:dim, :
]
__a = val[
dim : dim * 2, :
]
__a = val[
-dim:, :
]
else:
__a = val[:dim]
__a = val[
dim : dim * 2
]
__a = val[-dim:]
elif key.startswith("""mit""" ):
__a = key_split[2]
__a = config.vision_config.mit_hidden_size
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[dim : dim * 2]
__a = val[-dim:]
else:
__a = key_split[2]
__a = config.text_config.hidden_size
if "weight" in key:
__a = val[:dim, :]
__a = val[
dim : dim * 2, :
]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[
dim : dim * 2
]
__a = val[-dim:]
else:
__a = rename_key(__SCREAMING_SNAKE_CASE )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__a = val.T
__a = val
return orig_state_dict
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if num_frames == 8:
__a = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
__a = """eating_spaghetti.npy"""
elif num_frames == 32:
__a = """eating_spaghetti_32_frames.npy"""
__a = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=__SCREAMING_SNAKE_CASE , repo_type="""dataset""" , )
__a = np.load(__SCREAMING_SNAKE_CASE )
return list(__SCREAMING_SNAKE_CASE )
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
__a = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
__a = model_to_url[model_name]
__a = 8
if "16-frames" in model_name:
__a = 16
elif "shot" in model_name:
__a = 32
__a = get_xclip_config(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = XCLIPModel(__SCREAMING_SNAKE_CASE )
model.eval()
if "drive" in checkpoint_url:
__a = """pytorch_model.bin"""
gdown.cached_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , quiet=__SCREAMING_SNAKE_CASE )
__a = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
else:
__a = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE )["""model"""]
__a = convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = XCLIPModel(__SCREAMING_SNAKE_CASE )
__a , __a = model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__a = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
__a = VideoMAEImageProcessor(size=__SCREAMING_SNAKE_CASE )
__a = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
__a = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
__a = XCLIPProcessor(image_processor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
__a = prepare_video(__SCREAMING_SNAKE_CASE )
__a = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE )
# Verify outputs
__a = outputs.logits_per_video
__a = logits_per_video.softmax(dim=1 )
print("""Probs:""" , __SCREAMING_SNAKE_CASE )
# kinetics-400
if model_name == "xclip-base-patch32":
__a = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
__a = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
__a = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
__a = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
__a = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
__a = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__a = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__a = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__a = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__a = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__a = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__a = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__a = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__a = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__a = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__a = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__a = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__a = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(__SCREAMING_SNAKE_CASE , organization="""nielsr""" )
processor.push_to_hub(__SCREAMING_SNAKE_CASE , organization="""nielsr""" )
slow_tokenizer.push_to_hub(__SCREAMING_SNAKE_CASE , organization="""nielsr""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 582 | 0 |
"""simple docstring"""
import random
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ) -> tuple:
A , A , A = [], [], []
for element in data:
if element < pivot:
less.append(lowerCamelCase__ )
elif element > pivot:
greater.append(lowerCamelCase__ )
else:
equal.append(lowerCamelCase__ )
return less, equal, greater
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(lowerCamelCase__ ) or index < 0:
return None
A = items[random.randint(0 , len(lowerCamelCase__ ) - 1 )]
A = 0
A , A , A = _partition(lowerCamelCase__ , lowerCamelCase__ )
A = len(lowerCamelCase__ )
A = len(lowerCamelCase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowerCamelCase__ , lowerCamelCase__ )
# must be in larger
else:
return quick_select(lowerCamelCase__ , index - (m + count) )
| 109 |
"""simple docstring"""
import re
def lowerCAmelCase__ ( lowerCamelCase__ ) -> list:
return [char.split() for char in re.split(R'[^ a-z A-Z 0-9 \s]' , str_ )]
def lowerCAmelCase__ ( lowerCamelCase__ ) -> str:
A = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
try:
A = split_input(lowerCamelCase__ )
if upper:
A = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
A = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowerCAmelCase__ ( lowerCamelCase__ ) -> str:
return to_simple_case(lowerCamelCase__ )
def lowerCAmelCase__ ( lowerCamelCase__ ) -> str:
try:
A = to_simple_case(lowerCamelCase__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
return to_complex_case(lowerCamelCase__ , lowerCamelCase__ , '_' )
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
return to_complex_case(lowerCamelCase__ , lowerCamelCase__ , '-' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 109 | 1 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=2 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=36 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=6 , _lowerCAmelCase=6 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1000 , ) -> Any:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = text_seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = coordinate_size
_lowerCAmelCase = shape_size
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
_lowerCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCAmelCase = text_seq_length
_lowerCAmelCase = (image_size // patch_size) ** 2 + 1
_lowerCAmelCase = self.text_seq_length + self.image_seq_length
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase = bbox[i, j, 3]
_lowerCAmelCase = bbox[i, j, 1]
_lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase = bbox[i, j, 2]
_lowerCAmelCase = bbox[i, j, 0]
_lowerCAmelCase = t
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = LayoutLMvaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# text + image
_lowerCAmelCase = model(_lowerCAmelCase , pixel_values=_lowerCAmelCase )
_lowerCAmelCase = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCAmelCase = model(pixel_values=_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = LayoutLMvaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = LayoutLMvaForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = LayoutLMvaForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : Tuple = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCamelCase : Union[str, Any] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _snake_case ( self ) -> Any:
_lowerCAmelCase = LayoutLMvaModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Dict:
_lowerCAmelCase = copy.deepcopy(_lowerCAmelCase )
if model_class in get_values(_lowerCAmelCase ):
_lowerCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_lowerCAmelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowerCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in get_values(_lowerCAmelCase ):
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in [
*get_values(_lowerCAmelCase ),
]:
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in [
*get_values(_lowerCAmelCase ),
]:
_lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_lowerCAmelCase , )
return inputs_dict
def _snake_case ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def _snake_case ( self ) -> str:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def _snake_case ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = LayoutLMvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __a():
'''simple docstring'''
_lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[str]:
return LayoutLMvaImageProcessor(apply_ocr=_lowerCAmelCase ) if is_vision_available() else None
@slow
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(_lowerCAmelCase )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).pixel_values.to(_lowerCAmelCase )
_lowerCAmelCase = torch.tensor([[1, 2]] )
_lowerCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCAmelCase = model(
input_ids=input_ids.to(_lowerCAmelCase ) , bbox=bbox.to(_lowerCAmelCase ) , pixel_values=pixel_values.to(_lowerCAmelCase ) , )
# verify the logits
_lowerCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 18 | def A ( _lowercase = 1_000_000 ):
SCREAMING_SNAKE_CASE : Optional[int] = set(range(3 , _lowercase , 2 ) )
primes.add(2 )
for p in range(3 , _lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowercase , _lowercase ) ) )
SCREAMING_SNAKE_CASE : int = [float(_lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowercase , limit + 1 , _lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 248 | 0 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
if len(__a ) < 2:
return collection
def circle_sort_util(__a, __a, __a ) -> bool:
SCREAMING_SNAKE_CASE_ = False
if low == high:
return swapped
SCREAMING_SNAKE_CASE_ = low
SCREAMING_SNAKE_CASE_ = high
while left < right:
if collection[left] > collection[right]:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (
collection[right],
collection[left],
)
SCREAMING_SNAKE_CASE_ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (
collection[right + 1],
collection[left],
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = low + int((high - low) / 2 )
SCREAMING_SNAKE_CASE_ = circle_sort_util(__a, __a, __a )
SCREAMING_SNAKE_CASE_ = circle_sort_util(__a, mid + 1, __a )
return swapped or left_swap or right_swap
SCREAMING_SNAKE_CASE_ = True
while is_not_sorted is True:
SCREAMING_SNAKE_CASE_ = circle_sort_util(__a, 0, len(__a ) - 1 )
return collection
if __name__ == "__main__":
lowerCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted)) | 628 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
SCREAMING_SNAKE_CASE_ = [5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE_ = [4, 4, 4, 4]
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE_ = [2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "small" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "base" in model_name:
SCREAMING_SNAKE_CASE_ = 128
elif "large" in model_name:
SCREAMING_SNAKE_CASE_ = 192
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE_ = 256
elif "huge" in model_name:
SCREAMING_SNAKE_CASE_ = 352
# set label information
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE_ = '''imagenet-22k-id2label.json'''
else:
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__a, __a, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = FocalNetConfig(
embed_dim=__a, depths=__a, focal_levels=__a, focal_windows=__a, use_conv_embed=__a, idalabel=__a, labelaid=__a, use_post_layernorm=__a, use_layerscale=__a, )
return config
def _lowerCamelCase ( __a ):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.norm''', '''embeddings.norm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE_ = '''encoder.''' + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''encoder.layers''', '''encoder.stages''' )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''downsample.proj''', '''downsample.projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.f''', '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.h''', '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.proj''', '''modulation.projection_out''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE_ = '''layernorm.weight'''
if name == "norm.bias":
SCREAMING_SNAKE_CASE_ = '''layernorm.bias'''
if "head" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head''', '''classifier''' )
else:
SCREAMING_SNAKE_CASE_ = '''focalnet.''' + name
return name
def _lowerCamelCase ( __a, __a, __a=False ):
# fmt: off
SCREAMING_SNAKE_CASE_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
SCREAMING_SNAKE_CASE_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''', __a )
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__a, map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = get_focalnet_config(__a )
SCREAMING_SNAKE_CASE_ = FocalNetForImageClassification(__a )
model.eval()
# load state dict
model.load_state_dict(__a )
# verify conversion
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__a, size={'''shortest_edge''': 256}, resample=PILImageResampling.BILINEAR, do_center_crop=__a, crop_size=224, do_normalize=__a, image_mean=__a, image_std=__a, )
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__a, stream=__a ).raw )
SCREAMING_SNAKE_CASE_ = processor(images=__a, return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
SCREAMING_SNAKE_CASE_ = image_transforms(__a ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values, __a, atol=1E-4 )
SCREAMING_SNAKE_CASE_ = model(**__a )
SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''', model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''', outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3], __a, atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if push_to_hub:
print(F'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(F'{model_name}' )
processor.push_to_hub(F'{model_name}' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 628 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( __a ):
_A :List[Any] = ['''image_processor''', '''tokenizer''']
_A :List[str] = '''CLIPImageProcessor'''
_A :int = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self : Union[str, Any] , snake_case__ : Any=None , snake_case__ : Dict=None , **snake_case__ : str ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , snake_case_ , )
lowercase = kwargs.pop("""feature_extractor""" )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(snake_case_ , snake_case_ )
def __call__( self : Dict , snake_case__ : List[Any]=None , snake_case__ : Dict=None , snake_case__ : List[str]=None , **snake_case__ : Tuple ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowercase = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
lowercase = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self : str , *snake_case__ : List[Any] , **snake_case__ : List[str] ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , *snake_case__ : List[Any] , **snake_case__ : Optional[int] ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowercase = self.tokenizer.model_input_names
lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 428 |
from collections.abc import Sequence
from queue import Queue
class __a :
def __init__( self : str , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any]=None , snake_case_ : Dict=None)-> Optional[int]:
__lowerCAmelCase =start
__lowerCAmelCase =end
__lowerCAmelCase =val
__lowerCAmelCase =(start + end) // 2
__lowerCAmelCase =left
__lowerCAmelCase =right
def __repr__( self : List[Any])-> Dict:
return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class __a :
def __init__( self : List[str] , snake_case_ : Sequence , snake_case_ : int)-> str:
__lowerCAmelCase =collection
__lowerCAmelCase =function
if self.collection:
__lowerCAmelCase =self._build_tree(0 , len(snake_case_) - 1)
def UpperCamelCase ( self : Dict , snake_case_ : Tuple , snake_case_ : Optional[int])-> Optional[int]:
self._update_tree(self.root , snake_case_ , snake_case_)
def UpperCamelCase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : Union[str, Any])-> Union[str, Any]:
return self._query_range(self.root , snake_case_ , snake_case_)
def UpperCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : List[str])-> List[Any]:
if start == end:
return SegmentTreeNode(snake_case_ , snake_case_ , self.collection[start])
__lowerCAmelCase =(start + end) // 2
__lowerCAmelCase =self._build_tree(snake_case_ , snake_case_)
__lowerCAmelCase =self._build_tree(mid + 1 , snake_case_)
return SegmentTreeNode(snake_case_ , snake_case_ , self.fn(left.val , right.val) , snake_case_ , snake_case_)
def UpperCamelCase ( self : Dict , snake_case_ : Tuple , snake_case_ : int , snake_case_ : int)-> List[Any]:
if node.start == i and node.end == i:
__lowerCAmelCase =val
return
if i <= node.mid:
self._update_tree(node.left , snake_case_ , snake_case_)
else:
self._update_tree(node.right , snake_case_ , snake_case_)
__lowerCAmelCase =self.fn(node.left.val , node.right.val)
def UpperCamelCase ( self : Tuple , snake_case_ : str , snake_case_ : str , snake_case_ : List[Any])-> Optional[int]:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , snake_case_ , snake_case_)
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , snake_case_ , node.mid) , self._query_range(node.right , node.mid + 1 , snake_case_) , )
else:
# range in right child tree
return self._query_range(node.right , snake_case_ , snake_case_)
def UpperCamelCase ( self : List[str])-> Tuple:
if self.root is not None:
__lowerCAmelCase =Queue()
queue.put(self.root)
while not queue.empty():
__lowerCAmelCase =queue.get()
yield node
if node.left is not None:
queue.put(node.left)
if node.right is not None:
queue.put(node.right)
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
lowercase_ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 354 | 0 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowerCamelCase_ = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __magic_name__ ( __a : str , __a : Tuple , __a : List[Any] , __a : List[Any] , __a : int , __a : Optional[Any] ):
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
f" reinstalling {pkg}." )
if not ops[op](version.parse(__a ) , version.parse(__a ) ):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def __magic_name__ ( __a : str , __a : Optional[str] = None ):
'''simple docstring'''
UpperCamelCase__ = f"\n{hint}" if hint is not None else """"""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , __a ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = requirement, None, None
else:
UpperCamelCase__ = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , __a )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f" got {requirement}" )
UpperCamelCase__ , UpperCamelCase__ = match[0]
UpperCamelCase__ = want_full.split(""",""" ) # there could be multiple requirements
UpperCamelCase__ = {}
for w in want_range:
UpperCamelCase__ = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , __a )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f" but got {requirement}" )
UpperCamelCase__ , UpperCamelCase__ = match[0]
UpperCamelCase__ = want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
UpperCamelCase__ = """.""".join([str(__a ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
return
# check if any version is installed
try:
UpperCamelCase__ = importlib.metadata.version(__a )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
def __magic_name__ ( __a : Tuple ):
'''simple docstring'''
UpperCamelCase__ = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(__a , __a )
| 86 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 86 | 1 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 1000 ):
"""simple docstring"""
lowercase_ : List[str] = 2**power
lowercase_ : List[Any] = str(_UpperCamelCase )
lowercase_ : List[Any] = list(_UpperCamelCase )
lowercase_ : Union[str, Any] = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
UpperCamelCase__ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
UpperCamelCase__ = solution(power)
print('Sum of the digits is: ', result)
| 620 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
UpperCamelCase__ = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase_ : str = bs[:]
lowercase_ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowercase_ : Optional[int] = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = set()
lowercase_ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ : Dict = char
return pairs
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Dict = VOCAB_FILES_NAMES
__lowerCamelCase: int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase: str = ['input_ids', 'attention_mask']
def __init__( self : Tuple , a : Tuple , a : Tuple , a : int="replace" , a : Optional[int]="<s>" , a : Tuple="</s>" , a : Tuple="</s>" , a : Tuple="<s>" , a : Optional[Any]="<unk>" , a : Dict="<pad>" , a : List[str]="<mask>" , a : Tuple=False , **a : Optional[int] , ):
'''simple docstring'''
lowercase_ : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
lowercase_ : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
lowercase_ : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
lowercase_ : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
lowercase_ : Dict = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
lowercase_ : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Union[str, Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding="utf-8" ) as vocab_handle:
lowercase_ : Any = json.load(a )
lowercase_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowercase_ : Dict = errors # how to handle errors in decoding
lowercase_ : Any = bytes_to_unicode()
lowercase_ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding="utf-8" ) as merges_handle:
lowercase_ : Optional[Any] = merges_handle.read().split("\n" )[1:-1]
lowercase_ : Any = [tuple(merge.split() ) for merge in bpe_merges]
lowercase_ : List[str] = dict(zip(a , range(len(a ) ) ) )
lowercase_ : Optional[Any] = {}
lowercase_ : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase_ : str = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self : Any , a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase_ : Optional[Any] = tuple(a )
lowercase_ : Tuple = get_pairs(a )
if not pairs:
return token
while True:
lowercase_ : Any = min(a , key=lambda a : self.bpe_ranks.get(a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ : Dict = bigram
lowercase_ : List[Any] = []
lowercase_ : Optional[Any] = 0
while i < len(a ):
try:
lowercase_ : Union[str, Any] = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ : Any = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ : Any = tuple(a )
lowercase_ : List[str] = new_word
if len(a ) == 1:
break
else:
lowercase_ : Union[str, Any] = get_pairs(a )
lowercase_ : List[str] = " ".join(a )
lowercase_ : Optional[int] = word
return word
def lowerCAmelCase__ ( self : Any , a : str ):
'''simple docstring'''
lowercase_ : Dict = []
for token in re.findall(self.pat , a ):
lowercase_ : Tuple = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(" " ) )
return bpe_tokens
def lowerCAmelCase__ ( self : Tuple , a : Dict ):
'''simple docstring'''
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self : Tuple , a : str ):
'''simple docstring'''
return self.decoder.get(a )
def lowerCAmelCase__ ( self : int , a : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = "".join(a )
lowercase_ : str = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase__ ( self : List[str] , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase_ : Any = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ : Optional[int] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + "\n" )
lowercase_ : Dict = 0
with open(a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowercase_ : Optional[Any] = token_index
writer.write(" ".join(a ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self : List[str] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
lowercase_ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : int , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def lowerCAmelCase__ ( self : Optional[int] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ : Union[str, Any] = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self : str , a : Any , a : int=False , **a : List[Any] ):
'''simple docstring'''
lowercase_ : Any = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
lowercase_ : str = " " + text
return (text, kwargs)
| 620 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __magic_name__ ( _UpperCamelCase ):
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=False , __magic_name__=True , __magic_name__=9_9 , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=1_6 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = DistilBertModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ , __magic_name__ )
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = DistilBertForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = DistilBertForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(
__magic_name__ , attention_mask=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = DistilBertForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = DistilBertForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = DistilBertForMultipleChoice(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : str = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase : Dict = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[str] = True
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[Any] = True
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = DistilBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__magic_name__ , dim=3_7 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = DistilBertModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_lowerCAmelCase = True
_lowerCAmelCase = model_class(config=__magic_name__ )
_lowerCAmelCase = self._prepare_for_class(__magic_name__ , __magic_name__ )
_lowerCAmelCase = torch.jit.trace(
__magic_name__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__magic_name__ , os.path.join(__magic_name__ , 'traced_model.pt' ) )
_lowerCAmelCase = torch.jit.load(os.path.join(__magic_name__ , 'traced_model.pt' ) , map_location=__magic_name__ )
loaded(inputs_dict['input_ids'].to(__magic_name__ ) , inputs_dict['attention_mask'].to(__magic_name__ ) )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = DistilBertModel.from_pretrained('distilbert-base-uncased' )
_lowerCAmelCase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase = model(__magic_name__ , attention_mask=__magic_name__ )[0]
_lowerCAmelCase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
_lowerCAmelCase = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
| 704 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
a__ : str = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowerCAmelCase = k.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if k.startswith("""encoder""" ):
lowerCAmelCase = k.replace(""".attn""" , """.self_attn""" )
lowerCAmelCase = k.replace("""norm1""" , """self_attn_layer_norm""" )
lowerCAmelCase = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
lowerCAmelCase = k.replace("""norm1""" , """self_attn_layer_norm""" )
lowerCAmelCase = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
lowerCAmelCase = k.replace("""norm3""" , """final_layer_norm""" )
return k
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
lowerCAmelCase = sd.pop(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
lowerCAmelCase = v
SCREAMING_SNAKE_CASE__ = ["START"]
@torch.no_grad()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
lowerCAmelCase = model["""model"""]
lowerCAmelCase = BlenderbotConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase = BlenderbotForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase = m.model.state_dict().keys()
lowerCAmelCase = []
lowerCAmelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowerCAmelCase = rename_state_dict_key(SCREAMING_SNAKE_CASE__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowerCAmelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(SCREAMING_SNAKE_CASE__ )
m.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
m.half()
m.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 532 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
UpperCAmelCase_ = namedtuple('covid_data', 'cases deaths recovered')
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
UpperCAmelCase__ = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(SCREAMING_SNAKE_CASE__ ).content ).xpath(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase_ = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 603 | 0 |
import torch
from diffusers import StableDiffusionPipeline
__UpperCAmelCase = "path-to-your-trained-model"
__UpperCAmelCase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
__UpperCAmelCase = "A photo of sks dog in a bucket"
__UpperCAmelCase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 710 |
def A__ ( SCREAMING_SNAKE_CASE__ = 100) -> int:
__snake_case: str = 0
__snake_case: int = 0
for i in range(1 , n + 1):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 155 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=400 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=1 / 255 , SCREAMING_SNAKE_CASE=True , ) -> str:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCamelCase = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_pad
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> str:
"""simple docstring"""
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE , Image.Image ):
UpperCamelCase , UpperCamelCase = image.size
else:
UpperCamelCase , UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size["shortest_edge"] * h / w )
UpperCamelCase = self.size["shortest_edge"]
elif w > h:
UpperCamelCase = self.size["shortest_edge"]
UpperCamelCase = int(self.size["shortest_edge"] * w / h )
else:
UpperCamelCase = self.size["shortest_edge"]
UpperCamelCase = self.size["shortest_edge"]
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase , UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[0] )[0]
UpperCamelCase = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowercase = DetaImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = DetaImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_rescale" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_pad" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
UpperCamelCase = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
# prepare image and target
UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"image_id": 39769, "annotations": target}
# encode them
UpperCamelCase = DetaImageProcessor()
UpperCamelCase = image_processing(images=SCREAMING_SNAKE_CASE , annotations=SCREAMING_SNAKE_CASE , return_tensors="pt" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , SCREAMING_SNAKE_CASE ) )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , SCREAMING_SNAKE_CASE ) )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
# prepare image, target and masks_path
UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
UpperCamelCase = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
UpperCamelCase = DetaImageProcessor(format="coco_panoptic" )
UpperCamelCase = image_processing(images=SCREAMING_SNAKE_CASE , annotations=SCREAMING_SNAKE_CASE , masks_path=SCREAMING_SNAKE_CASE , return_tensors="pt" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , SCREAMING_SNAKE_CASE ) )
# verify masks
UpperCamelCase = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , SCREAMING_SNAKE_CASE )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , SCREAMING_SNAKE_CASE ) )
| 606 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowercase = CTRLTokenizer
lowercase = False
lowercase = False
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
UpperCamelCase = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
UpperCamelCase = {"unk_token": "<unk>"}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = "adapt react readapt apt"
UpperCamelCase = "adapt react readapt apt"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase = "adapt react readapt apt"
UpperCamelCase = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
UpperCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
| 606 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class snake_case_ ( a, unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = XLMRobertaTokenizer
__UpperCamelCase = XLMRobertaTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def __UpperCAmelCase ( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ =XLMRobertaTokenizer(A_, keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ ="<pad>"
UpperCAmelCase__ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ), A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ), A_ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "<s>" )
self.assertEqual(vocab_keys[1], "<pad>" )
self.assertEqual(vocab_keys[-1], "<mask>" )
self.assertEqual(len(A_ ), 1002 )
def __UpperCAmelCase ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size, 1002 )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ =XLMRobertaTokenizer(A_, keep_accents=A_ )
UpperCAmelCase__ =tokenizer.tokenize("This is a test" )
self.assertListEqual(A_, ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
UpperCAmelCase__ =tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A_, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
UpperCAmelCase__ =tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
UpperCAmelCase__ =tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
def __UpperCAmelCase ( self ) -> Optional[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase__ =(self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ =self.rust_tokenizer_class.from_pretrained(A_, **A_ )
UpperCAmelCase__ =self.tokenizer_class.from_pretrained(A_, **A_ )
UpperCAmelCase__ =tempfile.mkdtemp()
UpperCAmelCase__ =tokenizer_r.save_pretrained(A_ )
UpperCAmelCase__ =tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCAmelCase__ =tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(A_, A_ )
# Checks everything loads correctly in the same way
UpperCAmelCase__ =tokenizer_r.from_pretrained(A_ )
UpperCAmelCase__ =tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_, A_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ =tempfile.mkdtemp()
UpperCAmelCase__ =tokenizer_r.save_pretrained(A_, legacy_format=A_ )
UpperCAmelCase__ =tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files
self.assertSequenceEqual(A_, A_ )
# Checks everything loads correctly in the same way
UpperCAmelCase__ =tokenizer_r.from_pretrained(A_ )
UpperCAmelCase__ =tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_, A_ ) )
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ =tempfile.mkdtemp()
UpperCAmelCase__ =tokenizer_r.save_pretrained(A_, legacy_format=A_ )
UpperCAmelCase__ =tokenizer_p.save_pretrained(A_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ =tokenizer_r.from_pretrained(A_ )
UpperCAmelCase__ =tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_, A_ ) )
shutil.rmtree(A_ )
@cached_property
def __UpperCAmelCase ( self ) -> Tuple:
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def __UpperCAmelCase ( self ) -> int:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A_, f.name )
UpperCAmelCase__ =XLMRobertaTokenizer(f.name, keep_accents=A_ )
UpperCAmelCase__ =pickle.dumps(A_ )
pickle.loads(A_ )
def __UpperCAmelCase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ =self.get_tokenizer()
UpperCAmelCase__ =self.get_rust_tokenizer()
UpperCAmelCase__ ="I was born in 92000, and this is falsé."
UpperCAmelCase__ =tokenizer.tokenize(A_ )
UpperCAmelCase__ =rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_, A_ )
UpperCAmelCase__ =tokenizer.encode(A_, add_special_tokens=A_ )
UpperCAmelCase__ =rust_tokenizer.encode(A_, add_special_tokens=A_ )
self.assertListEqual(A_, A_ )
UpperCAmelCase__ =self.get_rust_tokenizer()
UpperCAmelCase__ =tokenizer.encode(A_ )
UpperCAmelCase__ =rust_tokenizer.encode(A_ )
self.assertListEqual(A_, A_ )
@slow
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ ="Hello World!"
UpperCAmelCase__ =[0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A_, self.big_tokenizer.encode(A_ ) )
@slow
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ =(
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
UpperCAmelCase__ =[
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A_, self.big_tokenizer.encode(A_ ) )
@slow
def __UpperCAmelCase ( self ) -> Dict:
# fmt: off
UpperCAmelCase__ ={"input_ids": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_, model_name="xlm-roberta-base", revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3", )
| 510 |
from __future__ import annotations
from math import gcd
def _UpperCAmelCase ( A , A = 2 , A = 1 , A = 3 , ):
'''simple docstring'''
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(A , A , A ) -> int:
return (pow(A , 2 ) + step) % modulus
for _ in range(A ):
# These track the position within the cycle detection logic.
UpperCAmelCase__ =seed
UpperCAmelCase__ =seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCAmelCase__ =rand_fn(A , A , A )
UpperCAmelCase__ =rand_fn(A , A , A )
UpperCAmelCase__ =rand_fn(A , A , A )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCAmelCase__ =gcd(hare - tortoise , A )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCAmelCase__ =hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
UpperCamelCase_ = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 510 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
lowercase__ : Optional[int] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowercase__ : Dict = {
"""allenai/led-base-16384""": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _lowerCAmelCase ( ) -> List[Any]:
__A : List[Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__A : Optional[int] = bs[:]
__A : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE__ )
cs.append(2**8 + n )
n += 1
__A : Optional[int] = [chr(SCREAMING_SNAKE_CASE__ ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[int]:
__A : str = set()
__A : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A : Tuple = char
return pairs
class SCREAMING_SNAKE_CASE (_lowerCAmelCase ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else bos_token
__A : Tuple = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else eos_token
__A : List[str] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else sep_token
__A : Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else cls_token
__A : Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else unk_token
__A : Dict = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__A : Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding='utf-8') as vocab_handle:
__A : Any = json.load(_UpperCAmelCase)
__A : Optional[int] = {v: k for k, v in self.encoder.items()}
__A : Optional[int] = errors # how to handle errors in decoding
__A : Tuple = bytes_to_unicode()
__A : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding='utf-8') as merges_handle:
__A : List[Any] = merges_handle.read().split('\n')[1:-1]
__A : str = [tuple(merge.split()) for merge in bpe_merges]
__A : List[str] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : Tuple = {}
__A : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__A : Optional[Any] = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return len(self.encoder)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__A : List[str] = tuple(_UpperCAmelCase)
__A : Optional[Any] = get_pairs(_UpperCAmelCase)
if not pairs:
return token
while True:
__A : Tuple = min(_UpperCAmelCase , key=lambda _UpperCAmelCase: self.bpe_ranks.get(_UpperCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
__A ,__A : Optional[int] = bigram
__A : str = []
__A : List[Any] = 0
while i < len(_UpperCAmelCase):
try:
__A : Tuple = word.index(_UpperCAmelCase , _UpperCAmelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__A : List[Any] = j
if word[i] == first and i < len(_UpperCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__A : List[Any] = tuple(_UpperCAmelCase)
__A : Any = new_word
if len(_UpperCAmelCase) == 1:
break
else:
__A : Union[str, Any] = get_pairs(_UpperCAmelCase)
__A : str = ' '.join(_UpperCAmelCase)
__A : Optional[Any] = word
return word
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = []
for token in re.findall(self.pat , _UpperCAmelCase):
__A : List[str] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase).split(' '))
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = ''.join(_UpperCAmelCase)
__A : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : Optional[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__A : Optional[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase) + '\n')
__A : Optional[int] = 0
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
__A : List[Any] = token_index
writer.write(' '.join(_UpperCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Union[str, Any] = [self.cls_token_id]
__A : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase)
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase)) + [1]
return [1] + ([0] * len(_UpperCAmelCase)) + [1, 1] + ([0] * len(_UpperCAmelCase)) + [1]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Dict = [self.sep_token_id]
__A : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False , **_UpperCAmelCase):
'''simple docstring'''
__A : int = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase) > 0 and not text[0].isspace()):
__A : Optional[int] = ' ' + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
'''simple docstring'''
__A : List[Any] = super()._pad(
encoded_inputs=_UpperCAmelCase , max_length=_UpperCAmelCase , padding_strategy=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
__A : Tuple = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__A : List[str] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__A : int = len(encoded_inputs['global_attention_mask']) != len(_UpperCAmelCase)
if needs_to_be_padded:
__A : Tuple = len(_UpperCAmelCase) - len(encoded_inputs['global_attention_mask'])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__A : Optional[int] = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
__A : Any = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side))
return encoded_inputs | 8 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optional[Any], SCREAMING_SNAKE_CASE__: Tuple, SCREAMING_SNAKE_CASE__: Dict ) -> List[Any]:
"""simple docstring"""
# Initialise PyTorch model
__a = RemBertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print('Building PyTorch model from configuration: {}'.format(str(SCREAMING_SNAKE_CASE__ ) ) )
__a = RemBertModel(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(SCREAMING_SNAKE_CASE__ ) )
torch.save(model.state_dict(), SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : List[str] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path) | 448 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowercase :
__lowercase : Union[str, Any] = XGLMConfig
__lowercase : Dict = {}
__lowercase : str = "gelu"
def __init__( self , A_ , A_=14 , A_=7 , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = ffn_dim
UpperCamelCase = activation_function
UpperCamelCase = activation_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = None
UpperCamelCase = 0
UpperCamelCase = 2
UpperCamelCase = 1
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = self.get_config()
UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=A_ , )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__lowercase : Optional[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__lowercase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__lowercase : str = False
__lowercase : Optional[Any] = False
__lowercase : Dict = False
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = TFXGLMModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , n_embd=37 )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self , A_=True ) -> List[str]:
"""simple docstring"""
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCamelCase = model.generate(A_ , do_sample=A_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , A_ )
@slow
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
UpperCamelCase = tokenizer('Today is a nice day and' , return_tensors='tf' )
UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
UpperCamelCase = model.generate(A_ , do_sample=A_ , seed=[7, 0] )
UpperCamelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=A_ )
UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_ , A_ )
@slow
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = 'left'
# use different length sentences to test batching
UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
UpperCamelCase = tokenizer(A_ , return_tensors='tf' , padding=A_ )
UpperCamelCase = inputs['input_ids']
UpperCamelCase = model.generate(input_ids=A_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
UpperCamelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(input_ids=A_ , max_new_tokens=12 )
UpperCamelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(input_ids=A_ , max_new_tokens=12 )
UpperCamelCase = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
| 716 |
from abc import ABC, abstractmethod
from typing import List, Optional
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
# test for the above condition
self.test()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = False
while not completed:
if counter == 1:
self.reset()
UpperCamelCase = self.advance()
if not self.does_advance(A_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Any:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCamelCase = token_ids
UpperCamelCase = len(self.token_ids )
UpperCamelCase = -1 # the index of the currently fulfilled step
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.fulfilled_idx += 1
UpperCamelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCamelCase = True
UpperCamelCase = completed
else:
# failed to make progress.
UpperCamelCase = True
self.reset()
return stepped, completed, reset
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = 0
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCamelCase ( self , A_=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = PhrasalConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.fulfilled_idx
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ , A_=True ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = max([len(A_ ) for one in nested_token_ids] )
UpperCamelCase = {}
for token_ids in nested_token_ids:
UpperCamelCase = root
for tidx, token_id in enumerate(A_ ):
if token_id not in level:
UpperCamelCase = {}
UpperCamelCase = level[token_id]
if no_subsets and self.has_subsets(A_ , A_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
UpperCamelCase = root
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.trie
for current_token in current_seq:
UpperCamelCase = start[current_token]
UpperCamelCase = list(start.keys() )
return next_tokens
def __UpperCamelCase ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.next_tokens(A_ )
return len(A_ ) == 0
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = list(root.values() )
if len(A_ ) == 0:
return 1
else:
return sum([self.count_leaves(A_ ) for nn in next_nodes] )
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.count_leaves(A_ )
return len(A_ ) != leaf_count
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCamelCase = DisjunctiveTrie(A_ )
UpperCamelCase = nested_token_ids
UpperCamelCase = self.trie.max_height
UpperCamelCase = []
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.trie.next_tokens(self.current_seq )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.current_seq.append(A_ )
UpperCamelCase = True
else:
UpperCamelCase = True
self.reset()
UpperCamelCase = self.trie.reached_leaf(self.current_seq )
UpperCamelCase = completed
return stepped, completed, reset
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = []
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
UpperCamelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.current_seq
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = constraints
# max # of steps required to fulfill a given constraint
UpperCamelCase = max([c.seqlen for c in constraints] )
UpperCamelCase = len(A_ )
UpperCamelCase = False
self.init_state()
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = None
UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints]
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCamelCase = constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
else:
UpperCamelCase = self.inprogress_constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCamelCase , UpperCamelCase = self.add(A_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCamelCase , UpperCamelCase = False, False
if self.completed:
UpperCamelCase = True
UpperCamelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) )
UpperCamelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCamelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCamelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A_ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(A_ )
UpperCamelCase = None
if not complete and stepped:
UpperCamelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCamelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCamelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCamelCase ( self , A_=True ) -> Tuple:
"""simple docstring"""
UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCamelCase = [
constraint.copy(stateful=A_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ )
UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 3 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Optional[Any] = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69 |
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def _UpperCamelCase ( _A ) -> Callable:
"""simple docstring"""
@wraps(_A )
def _inner_fn(*_A , **_A ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , _A , )
return fn(*_A , **_A )
return _inner_fn | 555 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
__lowercase : Tuple = 'facebook/bart-large-mnli'
__lowercase : str = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
__lowercase : str = 'text_classifier'
__lowercase : Any = AutoTokenizer
__lowercase : Tuple = AutoModelForSequenceClassification
__lowercase : Dict = ['text', ['text']]
__lowercase : List[str] = ['text']
def A__ ( self ):
super().setup()
UpperCAmelCase__ = self.model.config
UpperCAmelCase__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCAmelCase__ = int(__lowercase )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def A__ ( self , __lowercase , __lowercase ):
UpperCAmelCase__ = labels
return self.pre_processor(
[text] * len(__lowercase ) , [F'''This example is {label}''' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def A__ ( self , __lowercase ):
UpperCAmelCase__ = outputs.logits
UpperCAmelCase__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 422 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
UpperCAmelCase__ = old_name
if "patch_embed" in old_name:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = old_name.split(""".""" )
if layer == "0":
UpperCAmelCase__ = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
UpperCAmelCase__ = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
UpperCAmelCase__ = old_name.replace("""3""" , """convolution2""" )
else:
UpperCAmelCase__ = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(r"""\d\.\d""" , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = r"""\b\d{2}\b"""
if bool(re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase__ = re.search(r"""\d\.\d\d.""" , _SCREAMING_SNAKE_CASE ).group()
else:
UpperCAmelCase__ = re.search(r"""\d\.\d.""" , _SCREAMING_SNAKE_CASE ).group()
if int(match[0] ) < 6:
UpperCAmelCase__ = old_name.replace(_SCREAMING_SNAKE_CASE , """""" )
UpperCAmelCase__ = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
UpperCAmelCase__ = """intermediate_stages.""" + trimmed_name
else:
UpperCAmelCase__ = old_name.replace(_SCREAMING_SNAKE_CASE , """""" )
if int(match[2] ) < num_meta4D_last_stage:
UpperCAmelCase__ = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
UpperCAmelCase__ = str(int(match[2] ) - num_meta4D_last_stage )
UpperCAmelCase__ = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""fc2""" , """linear_out""" )
UpperCAmelCase__ = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(r""".\d.""" , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
UpperCAmelCase__ = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
UpperCAmelCase__ = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
UpperCAmelCase__ = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
UpperCAmelCase__ = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
UpperCAmelCase__ = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
UpperCAmelCase__ = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
UpperCAmelCase__ = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
UpperCAmelCase__ = new_name.replace("""norm""" , """layernorm""" )
UpperCAmelCase__ = """efficientformer.""" + new_name
else:
UpperCAmelCase__ = """efficientformer.encoder.""" + new_name
return new_name
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
for key in checkpoint.copy().keys():
UpperCAmelCase__ = checkpoint.pop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = val
return checkpoint
def snake_case__ ( ) ->Optional[Any]:
UpperCAmelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return image
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
UpperCAmelCase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
UpperCAmelCase__ = EfficientFormerConfig.from_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = EfficientFormerForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
UpperCAmelCase__ = config.depths[-1] - config.num_metaad_blocks + 1
UpperCAmelCase__ = convert_torch_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase__ = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = 2_5_6
UpperCAmelCase__ = 2_2_4
UpperCAmelCase__ = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
UpperCAmelCase__ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
# original processing pipeline
UpperCAmelCase__ = Compose(
[
Resize(_SCREAMING_SNAKE_CASE , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(_SCREAMING_SNAKE_CASE ),
ToTensor(),
Normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
] )
UpperCAmelCase__ = image_transforms(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = outputs.logits
UpperCAmelCase__ = (1, 1_0_0_0)
if "l1" in model_name:
UpperCAmelCase__ = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :1_0] , _SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
UpperCAmelCase__ = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :1_0] , _SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
UpperCAmelCase__ = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add model""" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add image processor""" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
a : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 422 | 1 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__magic_name__ : Tuple = logging.get_logger(__name__)
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
UpperCAmelCase__ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
UpperCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
_lowercase = self.task_name.lower()
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = 'train'
UpperCAmelCase__ = 'dev'
UpperCAmelCase__ = 'test'
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self : List[Any] , __A : GlueDataTrainingArguments , __A : PreTrainedTokenizerBase , __A : Optional[int] = None , __A : Union[str, Split] = Split.train , __A : Optional[str] = None , ):
"""simple docstring"""
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , __A , )
_lowercase = args
_lowercase = glue_processors[args.task_name]()
_lowercase = glue_output_modes[args.task_name]
if isinstance(__A , __A ):
try:
_lowercase = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
_lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowercase , _lowercase = label_list[2], label_list[1]
_lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowercase = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowercase = time.time()
_lowercase = torch.load(__A )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_lowercase = self.processor.get_test_examples(args.data_dir )
else:
_lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_lowercase = examples[:limit_length]
_lowercase = glue_convert_examples_to_features(
__A , __A , max_length=args.max_seq_length , label_list=__A , output_mode=self.output_mode , )
_lowercase = time.time()
torch.save(self.features , __A )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Optional[int] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : str , __A : Dict ):
"""simple docstring"""
return self.features[i]
def snake_case ( self : int ):
"""simple docstring"""
return self.label_list
| 497 |
'''simple docstring'''
import math
def A__ ( A_ , A_ ) -> int:
_lowercase = len(A_ )
_lowercase = int(math.floor(math.sqrt(A_ ) ) )
_lowercase = 0
while arr[min(A_ , A_ ) - 1] < x:
_lowercase = step
step += int(math.floor(math.sqrt(A_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_lowercase = prev + 1
if prev == min(A_ , A_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__magic_name__ : str = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ : Optional[Any] = [int(item) for item in user_input.split(''',''')]
__magic_name__ : Optional[int] = int(input('''Enter the number to be searched:\n'''))
__magic_name__ : Tuple = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f'''Number {x} is at index {res}''')
| 497 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _A ( unittest.TestCase):
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = FlaxAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : str = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = FlaxAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_SCREAMING_SNAKE_CASE ):
return model(**_SCREAMING_SNAKE_CASE )
eval(**_SCREAMING_SNAKE_CASE ).block_until_ready()
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
SCREAMING_SNAKE_CASE_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = FlaxRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_SCREAMING_SNAKE_CASE ):
return model(**_SCREAMING_SNAKE_CASE )
eval(**_SCREAMING_SNAKE_CASE ).block_until_ready()
def UpperCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE_ : Any = FlaxAutoModel.from_pretrained('bert-base' )
def UpperCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , revision='aaaaaa' )
def UpperCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
SCREAMING_SNAKE_CASE_ : Any = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , 'Use `from_pt=True` to load this model' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 719 |
def A_ ( a , a ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 | 0 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__A : Dict = trt.Logger(trt.Logger.WARNING)
__A : Dict = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__A : List[str] = logging.getLogger(__name__)
__A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__A : Tuple = parser.parse_args()
if args.tokenizer_name:
__A : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__A : Any = args.per_device_eval_batch_size
__A : Tuple = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__A : str = True
__A : List[str] = "temp_engine/bert-fp32.engine"
if args.fpaa:
__A : Union[str, Any] = "temp_engine/bert-fp16.engine"
if args.inta:
__A : int = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__A : Any = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__A : Dict = [network.get_input(i) for i in range(network.num_inputs)]
__A : Optional[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__A : Any = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__A : int = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__A : str = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_A = np.asarray(inputs['input_ids'] , dtype=np.intaa )
_A = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
_A = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _SCREAMING_SNAKE_CASE )
# start time
_A = time.time()
# Run inference
context.execute_async(
bindings=[int(_SCREAMING_SNAKE_CASE ) for d_inp in d_inputs] + [int(_SCREAMING_SNAKE_CASE ), int(_SCREAMING_SNAKE_CASE )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
cuda.memcpy_dtoh_async(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Synchronize the stream and take time
stream.synchronize()
# end time
_A = time.time()
_A = end_time - start_time
_A = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__A : Union[str, Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__A : Any = raw_datasets["validation"].column_names
__A : Any = "question" if "question" in column_names else column_names[0]
__A : List[Any] = "context" if "context" in column_names else column_names[1]
__A : List[str] = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__A : Dict = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
__A : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_A = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_A = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=_SCREAMING_SNAKE_CASE , stride=args.doc_stride , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_A = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_A = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_A = tokenized_examples.sequence_ids(_SCREAMING_SNAKE_CASE )
_A = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_A = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_A = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__A : List[str] = raw_datasets["validation"]
# Validation Feature Creation
__A : List[str] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__A : str = default_data_collator
__A : List[Any] = eval_dataset.remove_columns(["example_id", "offset_mapping"])
__A : int = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="eval" ) -> Optional[Any]:
"""simple docstring"""
_A = postprocess_qa_predictions(
examples=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_SCREAMING_SNAKE_CASE , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_A = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
_A = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
_A = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_SCREAMING_SNAKE_CASE , label_ids=_SCREAMING_SNAKE_CASE )
__A : Any = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return trt.volume(engine.get_binding_shape(_SCREAMING_SNAKE_CASE ) ) * engine.get_binding_dtype(_SCREAMING_SNAKE_CASE ).itemsize
# Allocate device memory for inputs and outputs.
__A : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__A : Tuple = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__A : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__A : List[str] = cuda.mem_alloc(h_outputa.nbytes)
__A : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__A : int = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
__A : int = 0.0
__A : Optional[Any] = 0
__A : Union[str, Any] = timeit.default_timer()
__A : int = None
for step, batch in enumerate(eval_dataloader):
__A , __A : Dict = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__A , __A : Any = outputs
__A : Any = torch.tensor(start_logits)
__A : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__A : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__A : Optional[Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__A : str = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__A : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__A : Dict = nested_truncate(all_preds, len(eval_dataset))
__A : Optional[Any] = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1_000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1_000))
logger.info("Total Number of Inference = %d", niter)
__A : Optional[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__A : Optional[int] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
| 27 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = KandinskyInpaintPipeline
_snake_case : int = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_snake_case : str = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_snake_case : Optional[int] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_snake_case : Optional[Any] = False
@property
def A ( self : int )-> Tuple:
return 32
@property
def A ( self : int )-> List[Any]:
return 32
@property
def A ( self : Dict )-> Tuple:
return self.time_input_dim
@property
def A ( self : Union[str, Any] )-> Tuple:
return self.time_input_dim * 4
@property
def A ( self : Dict )-> str:
return 1_00
@property
def A ( self : int )-> Dict:
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def A ( self : Tuple )-> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__UpperCamelCase = MultilingualCLIP(A_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def A ( self : int )-> str:
torch.manual_seed(0 )
__UpperCamelCase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def A ( self : Optional[int] )-> Union[str, Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A ( self : List[str] )-> Tuple:
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : str )-> List[Any]:
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
__UpperCamelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def A ( self : Union[str, Any] , A_ : Optional[Any] , A_ : Optional[Any]=0 )-> Dict:
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((2_56, 2_56) )
# create mask
__UpperCamelCase = np.ones((64, 64) , dtype=np.floataa )
__UpperCamelCase = 0
if str(A_ ).startswith("mps" ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def A ( self : Optional[int] )-> Dict:
__UpperCamelCase = "cpu"
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def A ( self : Union[str, Any] )-> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : str )-> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any )-> str:
__UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
__UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__UpperCamelCase = np.ones((7_68, 7_68) , dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = "a hat"
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__UpperCamelCase = pipeline(
A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="np" , )
__UpperCamelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(A_ , A_ ) | 505 | 0 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments | 308 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
) | 308 | 1 |
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17 |
def A__ (snake_case : float , snake_case : int ) -> float:
if digit_amount > 0:
return round(number - int(snake_case ) , snake_case )
return number - int(snake_case )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 279 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__( self , UpperCamelCase , UpperCamelCase=7 , UpperCamelCase=3 , UpperCamelCase=18 , UpperCamelCase=30 , UpperCamelCase=400 , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=[0.48_145_466, 0.4_578_275, 0.40_821_073] , UpperCamelCase=[0.26_862_954, 0.26_130_258, 0.27_577_711] , UpperCamelCase=True , ) -> int:
__a = size if size is not None else {'height': 224, 'width': 224}
__a = crop_size if crop_size is not None else {'height': 18, 'width': 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_convert_rgb
def UpperCamelCase__ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase__ ( self , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False ) -> int:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__a = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
__a = []
for i in range(self.batch_size ):
__a , __a = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__a = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
__a = [torch.from_numpy(UpperCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __lowercase ( __magic_name__ , unittest.TestCase ):
_a = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ) -> Any:
__a = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCamelCase )
@property
def UpperCamelCase__ ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) -> str:
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCamelCase , 'center_crop' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_convert_rgb' ) )
def UpperCamelCase__ ( self ) -> Optional[int]:
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 224, 'width': 224} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ) -> Tuple:
pass
def UpperCamelCase__ ( self ) -> Optional[int]:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ) -> Tuple:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ) -> List[Any]:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class __lowercase ( __magic_name__ , unittest.TestCase ):
_a = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ) -> Any:
__a = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCamelCase )
__a = 3
@property
def UpperCamelCase__ ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) -> str:
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCamelCase , 'center_crop' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_convert_rgb' ) )
def UpperCamelCase__ ( self ) -> List[Any]:
pass
def UpperCamelCase__ ( self ) -> List[Any]:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 490 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( a_ : int ):
__a = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def SCREAMING_SNAKE_CASE ( a_ : int = 100 ):
__a = 1
__a = 2
for i in range(2 , max_n + 1 ):
__a = pre_numerator
__a = 2 * i // 3 if i % 3 == 0 else 1
__a = cur_numerator
__a = e_cont * pre_numerator + temp
return sum_digits(a_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 490 | 1 |
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : List[str] = 16
a_ : Optional[Any] = 32
def UpperCAmelCase ( A__: List[Any] ) -> List[Any]:
return int(x / 2**20 )
class __lowercase:
'''simple docstring'''
def __enter__( self ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__lowerCamelCase : int = torch.cuda.memory_allocated()
return self
def __exit__( self , *__a ):
gc.collect()
torch.cuda.empty_cache()
__lowerCamelCase : Any = torch.cuda.memory_allocated()
__lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
__lowerCamelCase : int = bamb(self.end - self.begin )
__lowerCamelCase : str = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCAmelCase ( A__: Accelerator , A__: int = 16 , A__: str = "bert-base-cased" , A__: int = 320 , A__: int = 160 , ) -> int:
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase : Optional[Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f'''train[:{n_train}]''', 'validation': f'''validation[:{n_val}]'''} )
def tokenize_function(A__: Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase : int = datasets.map(
A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__: Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(A__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__lowerCamelCase : Union[str, Any] = DataLoader(
tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def UpperCAmelCase ( A__: Tuple , A__: Dict ) -> Optional[Any]:
# Initialize accelerator
__lowerCamelCase : Dict = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase : Optional[int] = config['lr']
__lowerCamelCase : Optional[Any] = int(config['num_epochs'] )
__lowerCamelCase : Optional[int] = int(config['seed'] )
__lowerCamelCase : Tuple = int(config['batch_size'] )
__lowerCamelCase : Dict = args.model_name_or_path
set_seed(A__ )
__lowerCamelCase , __lowerCamelCase : List[str] = get_dataloaders(A__ , A__ , A__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase : str = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowerCamelCase : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase : List[Any] = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__lowerCamelCase : Any = 1
__lowerCamelCase : List[str] = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase : Dict = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowerCamelCase : List[Any] = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase : List[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase : Tuple = 0
# Now we train the model
__lowerCamelCase : Dict = {}
for epoch in range(A__ , A__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(A__ ):
__lowerCamelCase : List[str] = model(**A__ )
__lowerCamelCase : Optional[Any] = outputs.loss
__lowerCamelCase : int = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__lowerCamelCase : List[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(A__ , A__ )
def UpperCAmelCase ( ) -> List[Any]:
__lowerCamelCase : str = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=A__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=A__ , )
parser.add_argument(
'--output_dir' , type=A__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=A__ , default=A__ , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=A__ , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=A__ , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=A__ , default=1 , help='Number of train epochs.' , )
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Dict = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 594 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCAmelCase ( A__: int , A__: str , A__: List[Any]=None , A__: Dict=None ) -> List[str]:
if attention_mask is None:
__lowerCamelCase : int = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowercase:
'''simple docstring'''
__a : Any = OPTConfig
__a : Union[str, Any] = {}
__a : Any = 'gelu'
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=16 , __a=2 , __a=4 , __a=4 , __a="gelu" , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , __a=16 , __a=16 , ):
__lowerCamelCase : Dict = parent
__lowerCamelCase : List[str] = batch_size
__lowerCamelCase : Tuple = seq_length
__lowerCamelCase : int = is_training
__lowerCamelCase : Optional[int] = use_labels
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : str = eos_token_id
__lowerCamelCase : int = pad_token_id
__lowerCamelCase : Union[str, Any] = bos_token_id
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : Tuple = word_embed_proj_dim
__lowerCamelCase : Any = False
def snake_case_ ( self ):
__lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase : Any = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__a , **self.config_updates , )
__lowerCamelCase : int = prepare_opt_inputs_dict(__a , __a )
return config, inputs_dict
def snake_case_ ( self , __a , __a ):
__lowerCamelCase : Optional[int] = TFOPTModel(config=__a )
__lowerCamelCase : Dict = inputs_dict['input_ids']
__lowerCamelCase : List[Any] = input_ids[:1, :]
__lowerCamelCase : Optional[int] = inputs_dict['attention_mask'][:1, :]
__lowerCamelCase : Any = 1
# first forward pass
__lowerCamelCase : int = model(__a , attention_mask=__a , use_cache=__a )
__lowerCamelCase , __lowerCamelCase : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase : Dict = model(__a , attention_mask=__a )[0]
__lowerCamelCase : str = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1E-3 )
@require_tf
class __lowercase( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
__a : List[Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__a : List[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
__a : List[str] = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
__a : List[Any] = False
__a : Dict = False
__a : Dict = False
__a : int = 10
def snake_case_ ( self ):
__lowerCamelCase : Optional[int] = TFOPTModelTester(self )
__lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
def snake_case_ ( self ):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def snake_case_ ( self ):
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__a , __a ):
if hasattr(__a , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__a , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowerCamelCase : Any = model_class(config=__a )
__lowerCamelCase : Union[str, Any] = _get_word_embedding_weight(__a , model.get_input_embeddings() )
__lowerCamelCase : Any = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__a )
__lowerCamelCase : int = _get_word_embedding_weight(__a , model.get_input_embeddings() )
__lowerCamelCase : Tuple = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCamelCase : Tuple = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __a )
# check that weights remain the same after resizing
__lowerCamelCase : List[Any] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase : str = False
self.assertTrue(__a )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __a )
__lowerCamelCase : Dict = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase : Tuple = False
self.assertTrue(__a )
def UpperCAmelCase ( A__: Tuple ) -> Dict:
return tf.constant(A__ , dtype=tf.intaa )
@require_tf
class __lowercase( unittest.TestCase ):
'''simple docstring'''
__a : str = 99
def snake_case_ ( self ):
__lowerCamelCase : str = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCamelCase : int = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCamelCase : int = input_ids.shape[0]
__lowerCamelCase : Optional[int] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowercase( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case_ ( self ):
__lowerCamelCase : str = TFOPTModel.from_pretrained('facebook/opt-350m' )
__lowerCamelCase : Dict = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__lowerCamelCase : List[Any] = tf.not_equal(__a , model.config.pad_token_id )
with tf.GradientTape():
__lowerCamelCase : Dict = model(input_ids=__a , attention_mask=__a ).last_hidden_state
__lowerCamelCase : List[Any] = (1, 11, 512)
self.assertEqual(output.shape , __a )
__lowerCamelCase : str = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4E-3 ) )
__lowerCamelCase : int = tf.function(__a , jit_compile=__a )
__lowerCamelCase : Optional[int] = xla_generate(__a , __a )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4E-2 ) )
@require_tf
@slow
class __lowercase( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
super().setUp()
__lowerCamelCase : str = 'facebook/opt-350m'
def snake_case_ ( self ):
__lowerCamelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCamelCase : Optional[int] = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCamelCase : str = tokenizer(__a , return_tensors='tf' , padding=__a , add_special_tokens=__a )
__lowerCamelCase : Tuple = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCamelCase : List[str] = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__a , __a , atol=1E-4 ) )
__lowerCamelCase : Union[str, Any] = tf.function(__a , jit_compile=__a )
__lowerCamelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__a , __a , atol=1E-4 ) )
@require_tf
@slow
class __lowercase( unittest.TestCase ):
'''simple docstring'''
@property
def snake_case_ ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def snake_case_ ( self ):
__lowerCamelCase : List[str] = 'facebook/opt-125m'
__lowerCamelCase : Dict = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCamelCase : Tuple = []
__lowerCamelCase : str = GPTaTokenizer.from_pretrained(__a )
__lowerCamelCase : Dict = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
__lowerCamelCase : Dict = tokenizer(__a , return_tensors='tf' ).input_ids
__lowerCamelCase : int = model.generate(__a , max_length=10 )
__lowerCamelCase : List[Any] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
def snake_case_ ( self ):
__lowerCamelCase : int = 'facebook/opt-350m'
__lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(__a )
__lowerCamelCase : str = TFOPTForCausalLM.from_pretrained(__a )
__lowerCamelCase : Optional[int] = 'left'
# use different length sentences to test batching
__lowerCamelCase : List[Any] = [
'Hello, my dog is a little',
'Today, I',
]
__lowerCamelCase : Optional[int] = tokenizer(__a , return_tensors='tf' , padding=__a )
__lowerCamelCase : Tuple = inputs['input_ids']
__lowerCamelCase : Optional[Any] = model.generate(input_ids=__a , attention_mask=inputs['attention_mask'] )
__lowerCamelCase : Any = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCamelCase : Optional[Any] = model.generate(input_ids=__a )
__lowerCamelCase : List[Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
__lowerCamelCase : Union[str, Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCamelCase : Any = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings )
__lowerCamelCase : Optional[int] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
__lowerCamelCase : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
__lowerCamelCase : Any = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
def snake_case_ ( self ):
__lowerCamelCase : Any = 'facebook/opt-350m'
__lowerCamelCase : str = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCamelCase : int = []
__lowerCamelCase : Tuple = GPTaTokenizer.from_pretrained(__a )
__lowerCamelCase : List[Any] = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
__lowerCamelCase : Optional[Any] = tokenizer(__a , return_tensors='tf' ).input_ids
__lowerCamelCase : List[Any] = model.generate(__a , max_length=10 )
__lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
| 594 | 1 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
_UpperCAmelCase = pytest.mark.integration
_UpperCAmelCase = {"""comet"""}
_UpperCAmelCase = importlib.util.find_spec("""fairseq""") is not None
_UpperCAmelCase = {"""code_eval"""}
_UpperCAmelCase = os.name == """nt"""
_UpperCAmelCase = {"""bertscore""", """frugalscore""", """perplexity"""}
_UpperCAmelCase = importlib.util.find_spec("""transformers""") is not None
def __magic_name__ ( lowercase ):
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self , lowercase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def __magic_name__ ( lowercase ):
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self , lowercase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def __magic_name__ ( lowercase ):
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self , lowercase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =[metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@local
class a ( parameterized.TestCase ):
UpperCamelCase : Optional[Any] = {}
UpperCamelCase : Any = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""[...]"""
SCREAMING_SNAKE_CASE_: Optional[int] =importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , lowercase__ ) ).module_path )
SCREAMING_SNAKE_CASE_: Optional[Any] =datasets.load.import_main_class(metric_module.__name__ , dataset=lowercase__ )
# check parameters
SCREAMING_SNAKE_CASE_: Dict =inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowercase__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
SCREAMING_SNAKE_CASE_: Optional[int] =doctest.testmod(lowercase__ , verbose=lowercase__ , raise_on_error=lowercase__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any ="""[...]"""
SCREAMING_SNAKE_CASE_: Any =importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , lowercase__ ) ).module_path )
# run doctest
with self.use_local_metrics():
SCREAMING_SNAKE_CASE_: Optional[int] =doctest.testmod(lowercase__ , verbose=lowercase__ , raise_on_error=lowercase__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowercase__ ):
yield
else:
yield
@contextmanager
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
def load_local_metric(lowerCAmelCase : Any , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return load_metric(os.path.join("""metrics""" , lowercase__ ) , *lowercase__ , **lowercase__ )
with patch("""datasets.load_metric""" ) as mock_load_metric:
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_local_metric
yield
@classmethod
def lowerCamelCase__ ( cls : int , lowerCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
def wrapper(lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_: List[str] =contextmanager(lowercase__ )
SCREAMING_SNAKE_CASE_: int =patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def __magic_name__ ( lowercase ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class a ( _UpperCAmelCase ):
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Dict ) -> str:
'''simple docstring'''
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.0_3, 1.0_4] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
SCREAMING_SNAKE_CASE_: Optional[int] =MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def __magic_name__ ( lowercase ):
import torch
def bert_cos_score_idf(lowercase , lowercase , *lowercase , **lowercase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
SCREAMING_SNAKE_CASE_: int =bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def __magic_name__ ( lowercase ):
def load_from_checkpoint(lowercase ):
class a :
def lowerCamelCase__ ( self : str , lowerCAmelCase : Any , *lowerCAmelCase : int , **lowerCAmelCase : int ) -> Any:
'''simple docstring'''
assert len(lowercase__ ) == 2
SCREAMING_SNAKE_CASE_: List[str] =[0.1_9, 0.9_2]
return scores, sum(lowercase__ ) / len(lowercase__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
SCREAMING_SNAKE_CASE_: List[Any] =None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
SCREAMING_SNAKE_CASE_: List[Any] =load_from_checkpoint
yield
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: str =load_metric(os.path.join("""metrics""" , """seqeval""" ) )
SCREAMING_SNAKE_CASE_: Optional[Any] ="""ERROR"""
SCREAMING_SNAKE_CASE_: Dict =f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(SCREAMING_SNAKE_CASE__ , match=re.escape(SCREAMING_SNAKE_CASE__ ) ):
metric.compute(predictions=[] , references=[] , scheme=SCREAMING_SNAKE_CASE__ )
| 703 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36 | 0 |
'''simple docstring'''
import requests
lowercase_ = "YOUR API KEY"
def lowerCAmelCase (__A , __A = giphy_api_key):
"""simple docstring"""
_a = '''+'''.join(query.split())
_a = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
_a = requests.get(__A).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 11 |
'''simple docstring'''
lowerCAmelCase__ = 'Alexander Joslin'
import operator as op
from .stack import Stack
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : List[str] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCamelCase__ : Stack[int] = Stack()
UpperCamelCase__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCamelCase_))
elif i in operators:
# RULE 2
operator_stack.push(lowerCamelCase_)
elif i == ")":
# RULE 4
UpperCamelCase__ : Optional[Any] = operator_stack.peek()
operator_stack.pop()
UpperCamelCase__ : Optional[Any] = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ : List[Any] = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ : List[Any] = operators[opr](lowerCamelCase_ , lowerCamelCase_)
operand_stack.push(lowerCamelCase_)
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCAmelCase__ = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 596 | 0 |
def __lowerCAmelCase ( UpperCAmelCase__ : int = 1_0**1_2 ) -> int:
lowerCamelCase_ = 1
lowerCamelCase_ = 0
lowerCamelCase_ = 1
lowerCamelCase_ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"""{solution() = }""")
| 705 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
lowercase = {'''mobilebert-uncased''': 5_1_2}
lowercase = {}
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = MobileBertTokenizer
def __init__( self : Union[str, Any] , __UpperCamelCase : str=None , __UpperCamelCase : str=None , __UpperCamelCase : Dict=True , __UpperCamelCase : Any="[UNK]" , __UpperCamelCase : str="[SEP]" , __UpperCamelCase : Dict="[PAD]" , __UpperCamelCase : List[str]="[CLS]" , __UpperCamelCase : Any="[MASK]" , __UpperCamelCase : Any=True , __UpperCamelCase : int=None , **__UpperCamelCase : Dict , ):
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCamelCase ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(__UpperCamelCase , normalizer_state.pop("""type""" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**__UpperCamelCase )
lowerCamelCase_ = do_lower_case
def lowercase__ ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict=None ):
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Tuple , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
lowerCamelCase_ = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 103 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ):
UpperCamelCase_ , UpperCamelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
UpperCamelCase_ : Any = """A painting of a squirrel eating a burger"""
UpperCamelCase_ : int = jax.device_count()
UpperCamelCase_ : Dict = num_samples * [prompt]
UpperCamelCase_ : str = sd_pipe.prepare_inputs(__lowerCAmelCase )
UpperCamelCase_ : List[Any] = replicate(__lowerCAmelCase )
UpperCamelCase_ : Optional[int] = shard(__lowerCAmelCase )
UpperCamelCase_ : List[Any] = jax.random.PRNGKey(0 )
UpperCamelCase_ : List[Any] = jax.random.split(__lowerCAmelCase , jax.device_count() )
UpperCamelCase_ : Optional[Any] = sd_pipe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_inference_steps=25 , jit=__lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
UpperCamelCase_ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_ : int = images[0, 2_53:2_56, 2_53:2_56, -1]
UpperCamelCase_ : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_ : Dict = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[str] = """stabilityai/stable-diffusion-2"""
UpperCamelCase_ , UpperCamelCase_ : List[str] = FlaxDPMSolverMultistepScheduler.from_pretrained(__lowerCAmelCase , subfolder="""scheduler""" )
UpperCamelCase_ , UpperCamelCase_ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
__lowerCAmelCase , scheduler=__lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
UpperCamelCase_ : int = scheduler_params
UpperCamelCase_ : List[Any] = """A painting of a squirrel eating a burger"""
UpperCamelCase_ : Optional[Any] = jax.device_count()
UpperCamelCase_ : List[str] = num_samples * [prompt]
UpperCamelCase_ : List[Any] = sd_pipe.prepare_inputs(__lowerCAmelCase )
UpperCamelCase_ : Optional[int] = replicate(__lowerCAmelCase )
UpperCamelCase_ : str = shard(__lowerCAmelCase )
UpperCamelCase_ : Any = jax.random.PRNGKey(0 )
UpperCamelCase_ : List[str] = jax.random.split(__lowerCAmelCase , jax.device_count() )
UpperCamelCase_ : Dict = sd_pipe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_inference_steps=25 , jit=__lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
UpperCamelCase_ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_ : int = images[0, 2_53:2_56, 2_53:2_56, -1]
UpperCamelCase_ : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_ : int = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 208 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__a : Optional[int] = DistilBertTokenizer
__a : Union[str, Any] = DistilBertTokenizerFast
__a : Any = True
@slow
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[Any] = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
UpperCamelCase_ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=__lowerCAmelCase )
UpperCamelCase_ : int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
UpperCamelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 208 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a ( unittest.TestCase ):
@property
def A_ ( self : Optional[int] ):
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def A_ ( self : List[Any] ):
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def A_ ( self : int ):
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = self.dummy_uncond_unet
snake_case_ = DDIMScheduler()
snake_case_ = self.dummy_vq_model
snake_case_ = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='''numpy''' ).images
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='''numpy''' , return_dict=lowercase_ )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
snake_case_ = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a ( unittest.TestCase ):
def A_ ( self : List[str] ):
snake_case_ = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='''numpy''' ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case_ = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
snake_case_ = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 593 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
a : Optional[int] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
def __init__( self : List[str] , *lowercase_ : Optional[int] , lowercase_ : Dict=None , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , **lowercase_ : Dict ):
super().__init__(*lowercase_ , **lowercase_ )
snake_case_ = eval_examples
snake_case_ = post_process_function
snake_case_ = quant_trainer_args
snake_case_ = 128 # default number of calibration samples
def A_ ( self : int , lowercase_ : Tuple=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
snake_case_ = calib_dataset if calib_dataset is not None else self.calib_dataset
snake_case_ = self._remove_unused_columns(lowercase_ , description='''Calibration''' )
return DataLoader(
lowercase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowercase_ , )
def A_ ( self : Dict , lowercase_ : str=None ):
snake_case_ = self.train_dataset if calib_dataset is None else calib_dataset
snake_case_ = self.get_calib_dataloader(lowercase_ )
snake_case_ = self.model
quant_trainer.configure_model(lowercase_ , self.quant_trainer_args , calib=lowercase_ )
model.eval()
quant_trainer.enable_calibration(lowercase_ )
logger.info('''***** Running calibration *****''' )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(lowercase_ ):
# Prediction step
snake_case_ ,snake_case_ ,snake_case_ = self.prediction_step(lowercase_ , lowercase_ , prediction_loss_only=lowercase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowercase_ , self.quant_trainer_args )
snake_case_ = model
def A_ ( self : Optional[int] , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : Optional[int]=None , lowercase_ : str = "eval" ):
snake_case_ = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case_ = self.get_eval_dataloader(lowercase_ )
snake_case_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ = self.compute_metrics
snake_case_ = None
snake_case_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case_ = eval_loop(
lowercase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , )
finally:
snake_case_ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
snake_case_ = self.post_process_function(lowercase_ , lowercase_ , output.predictions )
snake_case_ = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
snake_case_ = metrics.pop(lowercase_ )
self.log(lowercase_ )
else:
snake_case_ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def A_ ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Dict=None , lowercase_ : str = "test" ):
snake_case_ = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ = self.compute_metrics
snake_case_ = None
snake_case_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case_ = eval_loop(
lowercase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , )
finally:
snake_case_ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case_ = self.post_process_function(lowercase_ , lowercase_ , output.predictions , '''predict''' )
snake_case_ = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
snake_case_ = metrics.pop(lowercase_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
def A_ ( self : Any , lowercase_ : List[Any]="./" ):
snake_case_ = self.eval_dataset
snake_case_ = self.get_eval_dataloader(lowercase_ )
snake_case_ = next(iter(lowercase_ ) )
# saving device - to make it consistent
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
snake_case_ = tuple(v.to(lowercase_ ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
snake_case_ = True
snake_case_ = self.model.to(lowercase_ )
model.eval()
model.float()
snake_case_ = model.module if hasattr(lowercase_ , '''module''' ) else model
quant_trainer.configure_model(lowercase_ , self.quant_trainer_args )
snake_case_ = os.path.join(lowercase_ , '''model.onnx''' )
logger.info(F"exporting model to {output_model_file}" )
snake_case_ = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
lowercase_ , lowercase_ , lowercase_ , export_params=lowercase_ , opset_version=13 , do_constant_folding=lowercase_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=lowercase_ , )
logger.info('''onnx export finished''' )
| 593 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowerCamelCase : Tuple = '''\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'''
lowerCamelCase : Optional[Any] = '''\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'''
lowerCamelCase : Dict = '''\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'''
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ):
return float((preds == labels).mean() )
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : List[str] = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Any = float(fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ):
__lowercase : str = float(pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )[0] )
__lowercase : int = float(spearmanr(lowerCAmelCase_ , lowerCAmelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] ) -> Tuple:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A__ , A__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A__ , A__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A__ , A__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A__ , A__ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" ) | 149 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 0 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
"""simple docstring"""
def __init__( self : int,lowercase_ : Dict,lowercase_ : Any=1_3,lowercase_ : int=3_2,lowercase_ : List[str]=3,lowercase_ : int=4,lowercase_ : Any=[1_0, 2_0, 3_0, 4_0],lowercase_ : Optional[Any]=[2, 2, 3, 2],lowercase_ : str=True,lowercase_ : Optional[int]=True,lowercase_ : str=3_7,lowercase_ : List[str]="gelu",lowercase_ : Tuple=1_0,lowercase_ : Union[str, Any]=0.02,lowercase_ : Dict=["stage2", "stage3", "stage4"],lowercase_ : List[str]=[2, 3, 4],lowercase_ : Optional[Any]=None,)-> Dict:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = num_labels
A__ = initializer_range
A__ = out_features
A__ = out_indices
A__ = scope
def snake_case__ ( self : str )-> List[str]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : int )-> Dict:
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels,hidden_sizes=self.hidden_sizes,depths=self.depths,num_stages=self.num_stages,hidden_act=self.hidden_act,is_decoder=lowercase_,initializer_range=self.initializer_range,out_features=self.out_features,out_indices=self.out_indices,num_labels=self.num_labels,)
def snake_case__ ( self : List[Any],lowercase_ : Optional[Any],lowercase_ : Tuple,lowercase_ : Union[str, Any] )-> str:
'''simple docstring'''
A__ = ConvNextVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2),)
def snake_case__ ( self : str,lowercase_ : str,lowercase_ : Optional[Any],lowercase_ : int )-> Any:
'''simple docstring'''
A__ = ConvNextVaForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : Dict,lowercase_ : Optional[Any] )-> List[str]:
'''simple docstring'''
A__ = ConvNextVaBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A__ = None
A__ = ConvNextVaBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def snake_case__ ( self : int )-> Any:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
def snake_case__ ( self : str )-> str:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
A__ = ConvNextVaModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : Tuple )-> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : int )-> List[Any]:
'''simple docstring'''
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def snake_case__ ( self : Union[str, Any] )-> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def snake_case__ ( self : Optional[Any] )-> List[str]:
'''simple docstring'''
pass
def snake_case__ ( self : str )-> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
A__ , A__ = self.model_tester.prepare_config_and_inputs_with_labels()
A__ = True
if model_class.__name__ in [
*get_values(lowercase_ ),
*get_values(lowercase_ ),
]:
continue
A__ = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
A__ = self._prepare_for_class(lowercase_,lowercase_,return_labels=lowercase_ )
A__ = model(**lowercase_ ).loss
loss.backward()
def snake_case__ ( self : List[str] )-> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
A__ , A__ = self.model_tester.prepare_config_and_inputs_with_labels()
A__ = False
A__ = True
if (
model_class.__name__
in [*get_values(lowercase_ ), *get_values(lowercase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
A__ = model_class(lowercase_ )
model.to(lowercase_ )
model.gradient_checkpointing_enable()
model.train()
A__ = self._prepare_for_class(lowercase_,lowercase_,return_labels=lowercase_ )
A__ = model(**lowercase_ ).loss
loss.backward()
def snake_case__ ( self : int )-> Dict:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1],lowercase_ )
def snake_case__ ( self : List[Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
def check_hidden_states_output(lowercase_ : str,lowercase_ : Dict,lowercase_ : Any ):
A__ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(lowercase_,lowercase_ ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ),expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[self.model_tester.image_size // 4, self.model_tester.image_size // 4],)
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(lowercase_,lowercase_,lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def snake_case__ ( self : List[str] )-> Optional[Any]:
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ConvNextVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _snake_case( ) -> Optional[int]:
'''simple docstring'''
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : Any )-> Optional[int]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def snake_case__ ( self : int )-> int:
'''simple docstring'''
A__ = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(lowercase_ )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = preprocessor(images=lowercase_,return_tensors='pt' ).to(lowercase_ )
# forward pass
with torch.no_grad():
A__ = model(**lowercase_ )
# verify the logits
A__ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,lowercase_ )
A__ = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3],lowercase_,atol=1E-4 ) )
| 586 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowercase_ = random.Random()
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Tuple=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple,lowercase_ : str,lowercase_ : Optional[Any]=7,lowercase_ : Union[str, Any]=4_0_0,lowercase_ : Optional[int]=2_0_0_0,lowercase_ : Dict=2_0_4_8,lowercase_ : int=1_2_8,lowercase_ : str=1,lowercase_ : List[Any]=5_1_2,lowercase_ : Union[str, Any]=3_0,lowercase_ : Any=4_4_1_0_0,)-> Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = spectrogram_length
A__ = feature_size
A__ = num_audio_channels
A__ = hop_length
A__ = chunk_length
A__ = sampling_rate
def snake_case__ ( self : Tuple )-> Dict:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case__ ( self : Tuple,lowercase_ : List[Any]=False,lowercase_ : Optional[int]=False )-> str:
'''simple docstring'''
def _flatten(lowercase_ : Any ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length,self.max_seq_length,self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = TvltFeatureExtractor
def snake_case__ ( self : Optional[Any] )-> Dict:
'''simple docstring'''
A__ = TvltFeatureExtractionTester(self )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowercase_,'spectrogram_length' ) )
self.assertTrue(hasattr(lowercase_,'feature_size' ) )
self.assertTrue(hasattr(lowercase_,'num_audio_channels' ) )
self.assertTrue(hasattr(lowercase_,'hop_length' ) )
self.assertTrue(hasattr(lowercase_,'chunk_length' ) )
self.assertTrue(hasattr(lowercase_,'sampling_rate' ) )
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(lowercase_ )[0]
check_json_file_has_correct_format(lowercase_ )
A__ = self.feature_extraction_class.from_pretrained(lowercase_ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('mel_filters' )
A__ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowercase_,lowercase_ ) )
self.assertEqual(lowercase_,lowercase_ )
def snake_case__ ( self : str )-> Dict:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(lowercase_,'feat_extract.json' )
feat_extract_first.to_json_file(lowercase_ )
A__ = self.feature_extraction_class.from_json_file(lowercase_ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('mel_filters' )
A__ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowercase_,lowercase_ ) )
self.assertEqual(lowercase_,lowercase_ )
def snake_case__ ( self : Optional[int] )-> Optional[int]:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(8_0_0,1_4_0_0,2_0_0 )]
A__ = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test not batched input
A__ = feature_extractor(np_speech_inputs[0],return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A__ = feature_extractor(lowercase_,return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A__ = feature_extractor(
lowercase_,return_tensors='np',sampling_rate=4_4_1_0_0,mask_audio=lowercase_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
A__ = np.asarray(lowercase_ )
A__ = feature_extractor(lowercase_,return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case__ ( self : Optional[Any],lowercase_ : Tuple )-> Tuple:
'''simple docstring'''
A__ = load_dataset('hf-internal-testing/librispeech_asr_dummy','clean',split='validation' )
# automatic decoding with librispeech
A__ = ds.sort('id' ).select(range(lowercase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = self._load_datasamples(1 )
A__ = TvltFeatureExtractor()
A__ = feature_extractor(lowercase_,return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape,(1, 1, 1_9_2, 1_2_8) )
A__ = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2],lowercase_,atol=1E-4 ) )
| 586 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 417 | import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self : List[str] , snake_case : int , snake_case : Optional[int]=1_3 , snake_case : List[str]=3_0 , snake_case : Optional[Any]=2 , snake_case : Union[str, Any]=3 , snake_case : List[Any]=True , snake_case : Union[str, Any]=True , snake_case : List[Any]=3_2 , snake_case : int=5 , snake_case : int=4 , snake_case : List[str]=3_7 , snake_case : Union[str, Any]="gelu" , snake_case : int=0.1 , snake_case : Dict=0.1 , snake_case : Any=1_0 , snake_case : Any=0.02 , snake_case : int=3 , snake_case : int=0.6 , snake_case : str=None , ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[Any] = parent
UpperCamelCase_ : Optional[int] = batch_size
UpperCamelCase_ : Optional[Any] = image_size
UpperCamelCase_ : Optional[int] = patch_size
UpperCamelCase_ : List[str] = num_channels
UpperCamelCase_ : Optional[int] = is_training
UpperCamelCase_ : Tuple = use_labels
UpperCamelCase_ : str = hidden_size
UpperCamelCase_ : Union[str, Any] = num_hidden_layers
UpperCamelCase_ : int = num_attention_heads
UpperCamelCase_ : Optional[Any] = intermediate_size
UpperCamelCase_ : Optional[int] = hidden_act
UpperCamelCase_ : int = hidden_dropout_prob
UpperCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase_ : List[str] = type_sequence_label_size
UpperCamelCase_ : List[str] = initializer_range
UpperCamelCase_ : Union[str, Any] = mask_ratio
UpperCamelCase_ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase_ : int = (image_size // patch_size) ** 2
UpperCamelCase_ : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : Tuple = None
if self.use_labels:
UpperCamelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Dict , snake_case : Optional[int] , snake_case : str ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = ViTMAEModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : Any = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : List[Any] , snake_case : Tuple , snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : Optional[int] = model(snake_case )
UpperCamelCase_ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase_ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase_ : List[str] = 1
UpperCamelCase_ : Tuple = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ : Optional[Any] = model(snake_case )
UpperCamelCase_ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = config_and_inputs
UpperCamelCase_ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Tuple = ViTMAEModelTester(self )
UpperCamelCase_ : List[str] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : List[str] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Optional[Any] = model_class(snake_case )
UpperCamelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Optional[int] = [*signature.parameters.keys()]
UpperCamelCase_ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Tuple ) -> int:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase_ : Optional[Any] = torch.from_numpy(snake_case )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase_ : Optional[int] = pt_noise
super().check_pt_tf_models(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : str = model_class(snake_case )
model.to(snake_case )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase_ : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCamelCase_ : Any = outputs[0].cpu().numpy()
UpperCamelCase_ : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
UpperCamelCase_ : Any = model_class.from_pretrained(snake_case )
model.to(snake_case )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase_ : Optional[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
# Make sure we don't have nans
UpperCamelCase_ : int = after_outputs[0].cpu().numpy()
UpperCamelCase_ : Union[str, Any] = 0
UpperCamelCase_ : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : Tuple = ViTMAEModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __lowercase ( ):
UpperCamelCase_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_ : Dict = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(snake_case )
UpperCamelCase_ : str = self.default_image_processor
UpperCamelCase_ : int = prepare_img()
UpperCamelCase_ : Optional[int] = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase_ : Optional[Any] = ViTMAEConfig()
UpperCamelCase_ : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase_ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase_ : Tuple = model(**snake_case , noise=torch.from_numpy(snake_case ).to(device=snake_case ) )
# verify the logits
UpperCamelCase_ : Optional[int] = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCamelCase_ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(snake_case ) , atol=1e-4 ) )
| 417 | 1 |
"""simple docstring"""
import argparse
import datetime
def snake_case ( UpperCamelCase__ : str ) -> str:
lowerCamelCase : Any = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
lowerCamelCase : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCamelCase__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
lowerCamelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
lowerCamelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
lowerCamelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
lowerCamelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
lowerCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
lowerCamelCase : List[Any] = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) )
# Start math
if m <= 2:
lowerCamelCase : Optional[int] = y - 1
lowerCamelCase : int = m + 12
# maths var
lowerCamelCase : int = int(str(UpperCamelCase__ )[:2] )
lowerCamelCase : int = int(str(UpperCamelCase__ )[2:] )
lowerCamelCase : int = int(2.6 * m - 5.3_9 )
lowerCamelCase : int = int(c / 4 )
lowerCamelCase : int = int(k / 4 )
lowerCamelCase : int = int(d + k )
lowerCamelCase : int = int(t + u + v + x )
lowerCamelCase : int = int(z - (2 * c) )
lowerCamelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
lowerCamelCase : str = F'Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase :List[str] = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
__lowerCamelCase :Dict = parser.parse_args()
zeller(args.date_input)
| 42 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
set_seed(770)
UpperCamelCase__ : Union[str, Any] = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
UpperCamelCase__ : Optional[int] = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
UpperCamelCase__ : str = os.path.dirname(os.path.abspath(__file__))
UpperCamelCase__ : Union[str, Any] = os.path.join(os.path.expanduser("~"), ".cache")
UpperCamelCase__ : str = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = model_type
if use_small:
key += "_small"
return os.path.join(SCREAMING_SNAKE_CASE_ , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
hf_hub_download(repo_id=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , local_dir=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="text" ) -> Union[str, Any]:
"""simple docstring"""
if model_type == "text":
_SCREAMING_SNAKE_CASE = BarkSemanticModel
_SCREAMING_SNAKE_CASE = BarkSemanticConfig
_SCREAMING_SNAKE_CASE = BarkSemanticGenerationConfig
elif model_type == "coarse":
_SCREAMING_SNAKE_CASE = BarkCoarseModel
_SCREAMING_SNAKE_CASE = BarkCoarseConfig
_SCREAMING_SNAKE_CASE = BarkCoarseGenerationConfig
elif model_type == "fine":
_SCREAMING_SNAKE_CASE = BarkFineModel
_SCREAMING_SNAKE_CASE = BarkFineConfig
_SCREAMING_SNAKE_CASE = BarkFineGenerationConfig
else:
raise NotImplementedError()
_SCREAMING_SNAKE_CASE = F"{model_type}_small" if use_small else model_type
_SCREAMING_SNAKE_CASE = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(SCREAMING_SNAKE_CASE_ ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location=SCREAMING_SNAKE_CASE_ )
# this is a hack
_SCREAMING_SNAKE_CASE = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
_SCREAMING_SNAKE_CASE = model_args["""vocab_size"""]
_SCREAMING_SNAKE_CASE = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_SCREAMING_SNAKE_CASE = model_args.pop("""n_head""" )
_SCREAMING_SNAKE_CASE = model_args.pop("""n_embd""" )
_SCREAMING_SNAKE_CASE = model_args.pop("""n_layer""" )
_SCREAMING_SNAKE_CASE = ConfigClass(**checkpoint["""model_args"""] )
_SCREAMING_SNAKE_CASE = ModelClass(config=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = GenerationConfigClass()
_SCREAMING_SNAKE_CASE = model_generation_config
_SCREAMING_SNAKE_CASE = checkpoint["""model"""]
# fixup checkpoint
_SCREAMING_SNAKE_CASE = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(SCREAMING_SNAKE_CASE_ ):
# replace part of the key with corresponding layer name in HF implementation
_SCREAMING_SNAKE_CASE = k[len(SCREAMING_SNAKE_CASE_ ) :]
for old_layer_name in new_layer_name_dict:
_SCREAMING_SNAKE_CASE = new_k.replace(SCREAMING_SNAKE_CASE_ , new_layer_name_dict[old_layer_name] )
_SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = set(state_dict.keys() ) - set(model.state_dict().keys() )
_SCREAMING_SNAKE_CASE = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
_SCREAMING_SNAKE_CASE = set(model.state_dict().keys() ) - set(state_dict.keys() )
_SCREAMING_SNAKE_CASE = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(SCREAMING_SNAKE_CASE_ ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(SCREAMING_SNAKE_CASE_ ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = checkpoint["""best_val_loss"""].item()
logger.info(F"model loaded: {round(n_params/1e6 , 1 )}M params, {round(SCREAMING_SNAKE_CASE_ , 3 )} loss" )
model.eval()
model.to(SCREAMING_SNAKE_CASE_ )
del checkpoint, state_dict
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="text" ) -> int:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_SCREAMING_SNAKE_CASE = """cpu""" # do conversion on cpu
_SCREAMING_SNAKE_CASE = _get_ckpt_path(SCREAMING_SNAKE_CASE_ , use_small=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = _load_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , model_type=SCREAMING_SNAKE_CASE_ , use_small=SCREAMING_SNAKE_CASE_ )
# load bark initial model
_SCREAMING_SNAKE_CASE = _bark_load_model(SCREAMING_SNAKE_CASE_ , """cpu""" , model_type=SCREAMING_SNAKE_CASE_ , use_small=SCREAMING_SNAKE_CASE_ )
if model_type == "text":
_SCREAMING_SNAKE_CASE = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE_ ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
_SCREAMING_SNAKE_CASE = 5
_SCREAMING_SNAKE_CASE = 10
if model_type in ["text", "coarse"]:
_SCREAMING_SNAKE_CASE = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
_SCREAMING_SNAKE_CASE = bark_model(SCREAMING_SNAKE_CASE_ )[0]
_SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ )
# take last logits
_SCREAMING_SNAKE_CASE = output_new_model_total.logits[:, [-1], :]
else:
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = bark_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = BarkSemanticConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE_ , """config.json""" ) )
_SCREAMING_SNAKE_CASE = BarkCoarseConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE_ , """config.json""" ) )
_SCREAMING_SNAKE_CASE = BarkFineConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE_ , """config.json""" ) )
_SCREAMING_SNAKE_CASE = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
_SCREAMING_SNAKE_CASE = BarkSemanticModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = BarkCoarseModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = BarkFineModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
_SCREAMING_SNAKE_CASE = BarkConfig.from_sub_model_configs(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_SCREAMING_SNAKE_CASE = BarkModel(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = semantic
_SCREAMING_SNAKE_CASE = coarseAcoustic
_SCREAMING_SNAKE_CASE = fineAcoustic
_SCREAMING_SNAKE_CASE = codec
_SCREAMING_SNAKE_CASE = bark_generation_config
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
bark.save_pretrained(SCREAMING_SNAKE_CASE_ , repo_id=SCREAMING_SNAKE_CASE_ , push_to_hub=SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
UpperCamelCase__ : List[str] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 591 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase__ : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class _a :
"""simple docstring"""
SCREAMING_SNAKE_CASE = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'})
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={'help': 'The column name of the images in the files.'})
SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the training data.'})
SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the validation data.'})
SCREAMING_SNAKE_CASE = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'})
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = {}
if self.train_dir is not None:
_SCREAMING_SNAKE_CASE = self.train_dir
if self.validation_dir is not None:
_SCREAMING_SNAKE_CASE = self.validation_dir
_SCREAMING_SNAKE_CASE = data_files if data_files else None
@dataclass
class _a :
"""simple docstring"""
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'})
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'})
SCREAMING_SNAKE_CASE = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'help': 'Name or path of preprocessor config.'})
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'})
SCREAMING_SNAKE_CASE = field(
default=_lowerCamelCase , metadata={'help': 'Whether or not to train with normalized pixel values as target.'})
@dataclass
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'})
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
_SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_SCREAMING_SNAKE_CASE = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE_ ) and data_args.train_val_split > 0.0:
_SCREAMING_SNAKE_CASE = ds["""train"""].train_test_split(data_args.train_val_split )
_SCREAMING_SNAKE_CASE = split["""train"""]
_SCREAMING_SNAKE_CASE = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
_SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE_ )
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE_ )
else:
_SCREAMING_SNAKE_CASE = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE_ )
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE_ )
else:
_SCREAMING_SNAKE_CASE = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
_SCREAMING_SNAKE_CASE = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE_ )
if training_args.do_train:
_SCREAMING_SNAKE_CASE = ds["""train"""].column_names
else:
_SCREAMING_SNAKE_CASE = ds["""validation"""].column_names
if data_args.image_column_name is not None:
_SCREAMING_SNAKE_CASE = data_args.image_column_name
elif "image" in column_names:
_SCREAMING_SNAKE_CASE = """image"""
elif "img" in column_names:
_SCREAMING_SNAKE_CASE = """img"""
else:
_SCREAMING_SNAKE_CASE = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_SCREAMING_SNAKE_CASE = image_processor.size["""shortest_edge"""]
else:
_SCREAMING_SNAKE_CASE = (image_processor.size["""height"""], image_processor.size["""width"""])
_SCREAMING_SNAKE_CASE = Compose(
[
Lambda(lambda SCREAMING_SNAKE_CASE_ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(SCREAMING_SNAKE_CASE_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = [transforms(SCREAMING_SNAKE_CASE_ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(SCREAMING_SNAKE_CASE_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(SCREAMING_SNAKE_CASE_ )
# Compute absolute learning rate
_SCREAMING_SNAKE_CASE = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_SCREAMING_SNAKE_CASE = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_SCREAMING_SNAKE_CASE = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE = last_checkpoint
_SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_SCREAMING_SNAKE_CASE = trainer.evaluate()
trainer.log_metrics("""eval""" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("""eval""" , SCREAMING_SNAKE_CASE_ )
# Write model card and (optionally) push to hub
_SCREAMING_SNAKE_CASE = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 591 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowercase ( A, A, A, unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = StableUnCLIPImgaImgPipeline
_A : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_A : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_A : int = frozenset([] )
def A_ ( self : int ):
UpperCamelCase__ = 32
UpperCamelCase__ = embedder_hidden_size
# image encoding components
UpperCamelCase__ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_a , projection_dim=_a , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
UpperCamelCase__ = StableUnCLIPImageNormalizer(embedding_dim=_a )
UpperCamelCase__ = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_a , layers_per_block=1 , upcast_attention=_a , use_linear_projection=_a , )
torch.manual_seed(0 )
UpperCamelCase__ = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_a , steps_offset=1 , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL()
UpperCamelCase__ = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def A_ ( self : Optional[int] , _a : int , _a : Union[str, Any]=0 , _a : List[str]=True ):
if str(_a ).startswith('''mps''' ):
UpperCamelCase__ = torch.manual_seed(_a )
else:
UpperCamelCase__ = torch.Generator(device=_a ).manual_seed(_a )
UpperCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
if pil_image:
UpperCamelCase__ = input_image * 0.5 + 0.5
UpperCamelCase__ = input_image.clamp(0 , 1 )
UpperCamelCase__ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ = DiffusionPipeline.numpy_to_pil(_a )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A_ ( self : Any ):
UpperCamelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = StableUnCLIPImgaImgPipeline(**_a )
UpperCamelCase__ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
UpperCamelCase__ = self.get_dummy_inputs(_a )
inputs.update({'''image_embeds''': None} )
UpperCamelCase__ = sd_pipe(**_a ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def A_ ( self : Any ):
UpperCamelCase__ = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=_a )
def A_ ( self : Tuple ):
UpperCamelCase__ = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_a )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A_ ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_a )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
UpperCamelCase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
UpperCamelCase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase__ = pipe(_a , '''anime turle''' , generator=_a , output_type='''np''' )
UpperCamelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
UpperCamelCase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
UpperCamelCase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase__ = pipe(_a , '''anime turle''' , generator=_a , output_type='''np''' )
UpperCamelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
UpperCamelCase__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ = pipe(
_a , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 706 | import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( A ):
'''simple docstring'''
_A : List[Any] = ['''image_processor''', '''tokenizer''']
_A : List[str] = '''LayoutLMv3ImageProcessor'''
_A : Optional[int] = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self : str , _a : Optional[Any]=None , _a : Optional[Any]=None , **_a : Optional[Any] ):
UpperCamelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
UpperCamelCase__ = kwargs.pop('''feature_extractor''' )
UpperCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self : List[str] , _a : Any , _a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _a : Union[List[List[int]], List[List[List[int]]]] = None , _a : Optional[Union[List[int], List[List[int]]]] = None , _a : bool = True , _a : Union[bool, str, PaddingStrategy] = False , _a : Union[bool, str, TruncationStrategy] = None , _a : Optional[int] = None , _a : int = 0 , _a : Optional[int] = None , _a : Optional[bool] = None , _a : Optional[bool] = None , _a : bool = False , _a : bool = False , _a : bool = False , _a : bool = False , _a : bool = True , _a : Optional[Union[str, TensorType]] = None , **_a : List[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
UpperCamelCase__ = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
UpperCamelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase__ = features['''words''']
UpperCamelCase__ = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
UpperCamelCase__ = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCamelCase__ = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCamelCase__ = images
return encoded_inputs
def A_ ( self : Optional[int] , _a : int , _a : Tuple ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCamelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F""" {len(_a )} and {len(_a )}""" )
return images_with_overflow
def A_ ( self : List[Any] , *_a : List[Any] , **_a : Optional[Any] ):
return self.tokenizer.batch_decode(*_a , **_a )
def A_ ( self : int , *_a : str , **_a : str ):
return self.tokenizer.decode(*_a , **_a )
@property
def A_ ( self : Any ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def A_ ( self : List[str] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def A_ ( self : List[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 591 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
UpperCAmelCase = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = '''cpu'''
UpperCAmelCase = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
UpperCAmelCase = '''path-to-your-trained-model'''
UpperCAmelCase = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
UpperCAmelCase = pipe.to(device)
# to channels last
UpperCAmelCase = pipe.unet.to(memory_format=torch.channels_last)
UpperCAmelCase = pipe.vae.to(memory_format=torch.channels_last)
UpperCAmelCase = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
UpperCAmelCase = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
UpperCAmelCase = torch.randn(2, 4, 64, 64)
UpperCAmelCase = torch.rand(1) * 999
UpperCAmelCase = torch.randn(2, 77, 768)
UpperCAmelCase = (sample, timestep, encoder_hidden_status)
try:
UpperCAmelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
UpperCAmelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
UpperCAmelCase = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
UpperCAmelCase = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
UpperCAmelCase = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
UpperCAmelCase = 666
UpperCAmelCase = torch.Generator(device).manual_seed(seed)
UpperCAmelCase = {'''generator''': generator}
if args.steps is not None:
UpperCAmelCase = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
UpperCAmelCase = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 84 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case ):
lowercase = self.conv_in(snake_case )
lowercase = nn.silu(snake_case )
for block in self.blocks:
lowercase = block(snake_case )
lowercase = nn.silu(snake_case )
lowercase = self.conv_out(snake_case )
return embedding
@flax_register_to_config
class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = 32
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1280
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : str = "rgb"
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase , lowercase = jax.random.split(snake_case )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
if not is_final_block:
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(snake_case , axis=1 )
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(snake_case , 0 )
lowercase = self.time_proj(snake_case )
lowercase = self.time_embedding(snake_case )
# 2. pre-process
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.conv_in(snake_case )
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(snake_case )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ):
lowercase = controlnet_block(snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(snake_case )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
| 84 | 1 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = SMALL_MODEL_IDENTIFIER
__SCREAMING_SNAKE_CASE : int = """pt"""
__SCREAMING_SNAKE_CASE : str = """tf"""
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = TFAutoModel.from_pretrained(self.test_model , from_pt=lowerCAmelCase__ )
model_tf.save_pretrained(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = """mock_framework"""
# Framework provided - return whatever the user provides
__SCREAMING_SNAKE_CASE : Dict = FeaturesManager.determine_framework(self.test_model , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = FeaturesManager.determine_framework(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = FeaturesManager.determine_framework(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = FeaturesManager.determine_framework(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = FeaturesManager.determine_framework(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = FeaturesManager.determine_framework(lowerCAmelCase__ )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = MagicMock(return_value=lowerCAmelCase__ )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__SCREAMING_SNAKE_CASE : Dict = MagicMock(return_value=lowerCAmelCase__ )
with patch("""transformers.onnx.features.is_torch_available""" , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase__ , self.framework_tf )
# Both in environment -> use PyTorch
__SCREAMING_SNAKE_CASE : str = MagicMock(return_value=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = MagicMock(return_value=lowerCAmelCase__ )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase__ ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase__ , self.framework_pt )
# Both not in environment -> raise error
__SCREAMING_SNAKE_CASE : List[Any] = MagicMock(return_value=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = MagicMock(return_value=lowerCAmelCase__ )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase__ ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCAmelCase__ ):
with self.assertRaises(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : str = FeaturesManager.determine_framework(self.test_model ) | 178 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """embed_dim""" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """num_heads""" ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int=1_3 , lowerCAmelCase__ : Union[str, Any]=6_4 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : str=[1_6, 4_8, 9_6] , lowerCAmelCase__ : Any=[1, 3, 6] , lowerCAmelCase__ : Any=[1, 2, 1_0] , lowerCAmelCase__ : Union[str, Any]=[7, 3, 3] , lowerCAmelCase__ : Tuple=[4, 2, 2] , lowerCAmelCase__ : Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ : Dict=[2, 2, 2] , lowerCAmelCase__ : Optional[int]=[False, False, True] , lowerCAmelCase__ : List[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : List[str]=1E-12 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Optional[Any]=2 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = parent
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = image_size
__SCREAMING_SNAKE_CASE : Any = patch_sizes
__SCREAMING_SNAKE_CASE : List[Any] = patch_stride
__SCREAMING_SNAKE_CASE : List[Any] = patch_padding
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Dict = use_labels
__SCREAMING_SNAKE_CASE : int = num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = num_channels
__SCREAMING_SNAKE_CASE : List[str] = embed_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = num_heads
__SCREAMING_SNAKE_CASE : Optional[int] = stride_kv
__SCREAMING_SNAKE_CASE : Union[str, Any] = depth
__SCREAMING_SNAKE_CASE : Optional[Any] = cls_token
__SCREAMING_SNAKE_CASE : List[Any] = attention_drop_rate
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = CvtModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__SCREAMING_SNAKE_CASE : Any = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = CvtForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = config_and_inputs
__SCREAMING_SNAKE_CASE : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_A : Optional[Any] = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
_A : Dict = False
_A : Union[str, Any] = False
_A : str = False
_A : List[str] = False
_A : Optional[int] = False
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = CvtModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
return
@unittest.skip(reason="""Cvt does not output attentions""" )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ):
__SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[int] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[Any] = len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
pass
@slow
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = CvtModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor
__SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
__SCREAMING_SNAKE_CASE : int = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[int] = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) | 178 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.