code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE):
'''simple docstring'''
__UpperCamelCase : List[str] = 42
__UpperCamelCase : Optional[int] = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 351
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(sorted(SCREAMING_SNAKE_CASE_ ) )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return word_by_signature[signature(SCREAMING_SNAKE_CASE_ )]
__UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__UpperCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()})
__UpperCAmelCase : Union[str, Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__UpperCAmelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 315
| 0
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCAmelCase_ ( __lowercase):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = CustomTokenizer
pass
| 352
|
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) )
return data_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
score_lists.append(SCREAMING_SNAKE_CASE_ )
return score_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = final_scores[j] + ele
return final_scores
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
source_data[i].append(SCREAMING_SNAKE_CASE_ )
return source_data
| 315
| 0
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__UpperCAmelCase : str = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Any = list(s_dict.keys() )
for key in keys:
UpperCamelCase : Optional[Any] = R'''.*/layers_(\d+)'''
UpperCamelCase : List[str] = key
if re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = R'''(encoder|decoder)\/'''
if re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).groups()
if groups[0] == "encoder":
UpperCamelCase : List[str] = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , SCREAMING_SNAKE_CASE_ )
elif groups[0] == "decoder":
UpperCamelCase : Union[str, Any] = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , SCREAMING_SNAKE_CASE_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCamelCase : Any = new_key.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F"""{key} -> {new_key}""" )
UpperCamelCase : List[str] = s_dict.pop(SCREAMING_SNAKE_CASE_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCamelCase : str = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCamelCase : int = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCamelCase : Optional[Any] = s_dict[key].shape[0]
UpperCamelCase : Any = s_dict[key]
for idx in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(SCREAMING_SNAKE_CASE_ )
return s_dict
__UpperCAmelCase : Optional[Any] = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
import regex as re
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
UpperCamelCase : Union[str, Any] = f.read()
UpperCamelCase : str = re.findall(R'''(.*) = ([0-9.]*)''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCamelCase : Optional[int] = float(SCREAMING_SNAKE_CASE_ ) if '''.''' in value else int(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : Optional[Any] = str(activation[1] )
UpperCamelCase : List[str] = num_experts
UpperCamelCase : Optional[Any] = SwitchTransformersConfig(**SCREAMING_SNAKE_CASE_ )
return config
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[str]="./" , SCREAMING_SNAKE_CASE_ : str=8 ):
"""simple docstring"""
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCamelCase : int = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
if gin_file is not None:
UpperCamelCase : Any = convert_gin_to_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Dict = SwitchTransformersConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = SwitchTransformersForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = flax_params['''target''']
UpperCamelCase : List[Any] = flatten_dict(SCREAMING_SNAKE_CASE_ , sep='''/''' )
UpperCamelCase : str = rename_keys(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = unflatten_dict(SCREAMING_SNAKE_CASE_ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__UpperCAmelCase : str = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 353
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Union[str, Any] = ""
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def a ( ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('''Processing...''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index, image in enumerate(SCREAMING_SNAKE_CASE_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase : Optional[int] = random_chars(3_2 )
UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" )
UpperCamelCase : Any = []
for anno in new_annos[index]:
UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(F"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ):
UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
UpperCamelCase : List[str] = in_file.readlines()
UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" )
UpperCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : str = []
UpperCamelCase : int = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = []
UpperCamelCase : Optional[int] = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = anno_list[idx]
UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ )
if flip_type == 1:
UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE_ )
new_imgs_list.append(SCREAMING_SNAKE_CASE_ )
return new_imgs_list, new_annos_lists, path_list
def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase : Any = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 315
| 0
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__UpperCAmelCase : int = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
__UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple=False ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = create_model(
'''HTSAT-tiny''' , '''roberta''' , SCREAMING_SNAKE_CASE_ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=SCREAMING_SNAKE_CASE_ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = R'''.*sequential.(\d+).*'''
UpperCamelCase = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCamelCase = key.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# replace sequential layers with list
UpperCamelCase = re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).group(1 )
UpperCamelCase = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(SCREAMING_SNAKE_CASE_ )//3}.linear.""" )
elif re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = int(re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCamelCase = 1 if projecton_layer == 0 else 2
UpperCamelCase = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCamelCase = value
UpperCamelCase = mixed_qkv.size(0 ) // 3
UpperCamelCase = mixed_qkv[:qkv_dim]
UpperCamelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCamelCase = mixed_qkv[qkv_dim * 2 :]
UpperCamelCase = query_layer
UpperCamelCase = key_layer
UpperCamelCase = value_layer
else:
UpperCamelCase = value
return model_state_dict
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any=False ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = init_clap(SCREAMING_SNAKE_CASE_ , enable_fusion=SCREAMING_SNAKE_CASE_ )
clap_model.eval()
UpperCamelCase = clap_model.state_dict()
UpperCamelCase = rename_state_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = ClapConfig()
UpperCamelCase = enable_fusion
UpperCamelCase = ClapModel(SCREAMING_SNAKE_CASE_ )
# ignore the spectrogram embedding layer
model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
transformers_config.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
__UpperCAmelCase : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 354
|
import qiskit
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : int = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 315
| 0
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def a ( ):
UpperCamelCase : Dict = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
UpperCamelCase : List[Any] = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
DownloadCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
RunCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
ServeCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
UserCommands.register_subcommand(SCREAMING_SNAKE_CASE_ )
AddNewModelCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
AddNewModelLikeCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
LfsCommands.register_subcommand(SCREAMING_SNAKE_CASE_ )
PTtoTFCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
UpperCamelCase : Union[str, Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 355
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = CLIPConfig
__UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"]
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = CLIPVisionModel(config.vision_config )
UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy()
UpperCamelCase : Dict = []
UpperCamelCase : List[str] = image_embeds.shape[0]
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Optional[int] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase : List[str] = special_cos_dist[i][concept_idx]
UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCamelCase : Optional[int] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase : Optional[int] = cos_dist[i][concept_idx]
UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds )
UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Union[str, Any] = 0.0
UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
UpperCamelCase : int = special_care * 0.01
UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 315
| 0
|
from pathlib import Path
import numpy as np
from PIL import Image
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
UpperCamelCase : Any = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
return (gray > 1_2_7) & (gray <= 2_5_5)
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = np.zeros_like(lowerCamelCase_ )
UpperCamelCase : Optional[Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCamelCase : Optional[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCamelCase : List[str] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCamelCase : Optional[int] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__UpperCAmelCase : List[Any] = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
__UpperCAmelCase : Optional[Any] = np.array(Image.open(lena_path))
# kernel to be applied
__UpperCAmelCase : Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__UpperCAmelCase : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__UpperCAmelCase : Optional[int] = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 356
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
UpperCamelCase : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 315
| 0
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : str = LEDConfig
__UpperCamelCase : List[Any] = {}
__UpperCamelCase : List[str] = """gelu"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=20 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=4 , ):
"""simple docstring"""
UpperCamelCase : Optional[int] = parent
UpperCamelCase : int = batch_size
UpperCamelCase : Any = seq_length
UpperCamelCase : Optional[int] = is_training
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : List[str] = num_attention_heads
UpperCamelCase : Any = intermediate_size
UpperCamelCase : Tuple = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : List[Any] = eos_token_id
UpperCamelCase : Optional[Any] = pad_token_id
UpperCamelCase : List[str] = bos_token_id
UpperCamelCase : Tuple = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCamelCase : List[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCamelCase : List[Any] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(_A , _A , _A )
UpperCamelCase : str = tf.concat(
[tf.zeros_like(_A )[:, :-1], tf.ones_like(_A )[:, -1:]] , axis=-1 , )
UpperCamelCase : List[Any] = global_attention_mask
return config, inputs_dict
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = TFLEDModel(config=_A ).get_decoder()
UpperCamelCase : Optional[int] = inputs_dict['input_ids']
UpperCamelCase : int = input_ids[:1, :]
UpperCamelCase : Dict = inputs_dict['attention_mask'][:1, :]
UpperCamelCase : Union[str, Any] = 1
# first forward pass
UpperCamelCase : Union[str, Any] = model(_A , attention_mask=_A , use_cache=_A )
UpperCamelCase : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase : Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase : int = model(_A , attention_mask=_A )[0]
UpperCamelCase : Optional[int] = model(_A , attention_mask=_A , past_key_values=_A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase : Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase : int = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_A , _A , rtol=1e-3 )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
UpperCamelCase : Dict = tf.cast(tf.math.not_equal(UpperCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__UpperCamelCase : List[Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : Tuple = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : int = False
__UpperCamelCase : Dict = False
__UpperCamelCase : Any = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = TFLEDModelTester(self )
UpperCamelCase : Dict = ConfigTester(self , config_class=_A )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict['''attention_mask'''] )
UpperCamelCase : List[Any] = 2
UpperCamelCase : Any = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
UpperCamelCase : List[Any] = True
UpperCamelCase : List[Any] = self.model_tester.seq_length
UpperCamelCase : Tuple = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = outputs.decoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = [t.numpy() for t in outputs.encoder_attentions]
UpperCamelCase : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCamelCase : Any = True
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : int = False
UpperCamelCase : Union[str, Any] = model_class(_A )
UpperCamelCase : int = model(self._prepare_for_class(_A , _A ) )
UpperCamelCase : Optional[Any] = len(_A )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
UpperCamelCase : List[str] = model_class(_A )
UpperCamelCase : Optional[int] = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[Any] = model_class(_A )
UpperCamelCase : Tuple = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
UpperCamelCase : List[Any] = True
UpperCamelCase : str = True
UpperCamelCase : Optional[int] = model_class(_A )
UpperCamelCase : Optional[Any] = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) )
self.assertEqual(model.config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
pass
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
return tf.constant(UpperCAmelCase_ , dtype=tf.intaa )
__UpperCAmelCase : Optional[int] = 1E-4
@slow
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
UpperCamelCase : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase : Tuple = prepare_led_inputs_dict(model.config , _A , _A )
UpperCamelCase : List[str] = model(**_A )[0]
UpperCamelCase : Optional[int] = (1, 1_024, 768)
self.assertEqual(output.shape , _A )
# change to expected output here
UpperCamelCase : Any = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1e-3 )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
UpperCamelCase : str = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase : List[Any] = prepare_led_inputs_dict(model.config , _A , _A )
UpperCamelCase : int = model(**_A )[0]
UpperCamelCase : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _A )
# change to expected output here
UpperCamelCase : Dict = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1e-3 , rtol=1e-3 )
| 357
|
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315
| 0
|
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class UpperCAmelCase_ ( lowerCamelCase__):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = value_function
UpperCamelCase : Tuple = unet
UpperCamelCase : int = scheduler
UpperCamelCase : Tuple = env
UpperCamelCase : int = env.get_dataset()
UpperCamelCase : Dict = {}
for key in self.data.keys():
try:
UpperCamelCase : Dict = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase : int = {}
for key in self.data.keys():
try:
UpperCamelCase : Dict = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase : str = env.observation_space.shape[0]
UpperCamelCase : List[Any] = env.action_space.shape[0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if type(__snake_case ) is dict:
return {k: self.to_torch(__snake_case ) for k, v in x_in.items()}
elif torch.is_tensor(__snake_case ):
return x_in.to(self.unet.device )
return torch.tensor(__snake_case , device=self.unet.device )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for key, val in cond.items():
UpperCamelCase : List[str] = val.clone()
return x_in
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = x.shape[0]
UpperCamelCase : List[str] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase : Any = torch.full((batch_size,) , __snake_case , device=self.unet.device , dtype=torch.long )
for _ in range(__snake_case ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase : str = self.value_function(x.permute(0 , 2 , 1 ) , __snake_case ).sample
UpperCamelCase : int = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase : Dict = self.scheduler._get_variance(__snake_case )
UpperCamelCase : int = torch.exp(0.5 * posterior_variance )
UpperCamelCase : Optional[Any] = model_std * grad
UpperCamelCase : Dict = 0
UpperCamelCase : Any = x.detach()
UpperCamelCase : Optional[int] = x + scale * grad
UpperCamelCase : List[str] = self.reset_xa(__snake_case , __snake_case , self.action_dim )
UpperCamelCase : Union[str, Any] = self.unet(x.permute(0 , 2 , 1 ) , __snake_case ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase : Any = self.scheduler.step(__snake_case , __snake_case , __snake_case , predict_epsilon=__snake_case )['prev_sample']
# apply conditions to the trajectory (set the initial state)
UpperCamelCase : Any = self.reset_xa(__snake_case , __snake_case , self.action_dim )
UpperCamelCase : List[Any] = self.to_torch(__snake_case )
return x, y
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.normalize(__snake_case , '''observations''' )
UpperCamelCase : Dict = obs[None].repeat(__snake_case , axis=0 )
UpperCamelCase : str = {0: self.to_torch(__snake_case )}
UpperCamelCase : str = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase : str = randn_tensor(__snake_case , device=self.unet.device )
UpperCamelCase : Union[str, Any] = self.reset_xa(__snake_case , __snake_case , self.action_dim )
UpperCamelCase : Any = self.to_torch(__snake_case )
# run the diffusion process
UpperCamelCase : Dict = self.run_diffusion(__snake_case , __snake_case , __snake_case , __snake_case )
# sort output trajectories by value
UpperCamelCase : Any = y.argsort(0 , descending=__snake_case ).squeeze()
UpperCamelCase : str = x[sorted_idx]
UpperCamelCase : List[Any] = sorted_values[:, :, : self.action_dim]
UpperCamelCase : Optional[int] = actions.detach().cpu().numpy()
UpperCamelCase : Optional[Any] = self.de_normalize(__snake_case , key='''actions''' )
# select the action with the highest value
if y is not None:
UpperCamelCase : Optional[Any] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase : List[Any] = np.random.randint(0 , __snake_case )
UpperCamelCase : Optional[int] = denorm_actions[selected_index, 0]
return denorm_actions
| 358
|
import math
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = factor * value
UpperCamelCase : Optional[int] = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 315
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCAmelCase : Tuple = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__, metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
__UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE__, metadata={"help": "Whether tp freeze the encoder."})
__UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE__, metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__UpperCamelCase : Optional[str] = field(
default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, )
__UpperCamelCase : Optional[int] = field(
default=1024, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
__UpperCamelCase : Optional[int] = field(
default=128, metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
__UpperCamelCase : Optional[int] = field(
default=142, metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
}, )
__UpperCamelCase : Optional[int] = field(
default=142, metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
__UpperCamelCase : Optional[int] = field(default=-1, metadata={"help": "# training examples. -1 means use all."})
__UpperCamelCase : Optional[int] = field(default=-1, metadata={"help": "# validation examples. -1 means use all."})
__UpperCamelCase : Optional[int] = field(default=-1, metadata={"help": "# test examples. -1 means use all."})
__UpperCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__, metadata={"help": "Source language id for translation."})
__UpperCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__, metadata={"help": "Target language id for translation."})
__UpperCamelCase : Optional[int] = field(default=SCREAMING_SNAKE_CASE__, metadata={"help": "# num_beams to use for evaluation."})
__UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , F"""{split}_results.json""" ) )
def _UpperCAmelCase ( ):
"""simple docstring"""
UpperCamelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = parser.parse_args_into_dataclasses()
check_output_dir(lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase : str = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
assert hasattr(lowerCAmelCase__ , lowerCAmelCase__ ), F"""({config.__class__.__name__}) doesn\'t have a `{p}` attribute"""
setattr(lowerCAmelCase__ , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase : Tuple = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowerCAmelCase__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase : int = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowerCAmelCase__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCamelCase : List[str] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowerCAmelCase__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase : str = SeqaSeqDataset
# Get datasets
UpperCamelCase : Any = (
dataset_class(
lowerCAmelCase__ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
UpperCamelCase : Optional[Any] = (
dataset_class(
lowerCAmelCase__ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase : Optional[int] = (
dataset_class(
lowerCAmelCase__ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase : str = (
build_compute_metrics_fn(data_args.task , lowerCAmelCase__ ) if training_args.predict_with_generate else None
)
UpperCamelCase : Optional[int] = SeqaSeqTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , data_args=lowerCAmelCase__ , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , data_collator=SeqaSeqDataCollator(
lowerCAmelCase__ , lowerCAmelCase__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , )
UpperCamelCase : Optional[int] = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
UpperCamelCase : Union[str, Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase : List[Any] = train_result.metrics
UpperCamelCase : List[str] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , lowerCAmelCase__ , training_args.output_dir )
all_metrics.update(lowerCAmelCase__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase : int = trainer.evaluate(metric_key_prefix='''val''' )
UpperCamelCase : List[str] = data_args.n_val
UpperCamelCase : Any = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , lowerCAmelCase__ , training_args.output_dir )
all_metrics.update(lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCamelCase : Union[str, Any] = trainer.predict(test_dataset=lowerCAmelCase__ , metric_key_prefix='''test''' )
UpperCamelCase : Tuple = test_output.metrics
UpperCamelCase : List[str] = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase : Union[str, Any] = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , lowerCAmelCase__ , training_args.output_dir )
all_metrics.update(lowerCAmelCase__ )
if training_args.predict_with_generate:
UpperCamelCase : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
UpperCamelCase : Dict = lmap(str.strip , lowerCAmelCase__ )
write_txt_file(lowerCAmelCase__ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(lowerCAmelCase__ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 359
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 315
| 0
|
from __future__ import annotations
import typing
from collections import Counter
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : List[Any] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(SCREAMING_SNAKE_CASE_ , max_perimeter + 1 ):
UpperCamelCase : Tuple = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def a ( SCREAMING_SNAKE_CASE_ : Dict = 1_0_0_0 ):
"""simple docstring"""
UpperCamelCase : Tuple = pythagorean_triple(SCREAMING_SNAKE_CASE_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 360
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : List[str] = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Union[str, Any] = min_resolution
UpperCamelCase : Tuple = max_resolution
UpperCamelCase : List[str] = do_resize
UpperCamelCase : List[str] = size
UpperCamelCase : int = apply_ocr
def _lowercase ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE )
# Test batched
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : Optional[int] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
UpperCamelCase : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase : Union[str, Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCamelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE )
# with apply_OCR = False
UpperCamelCase : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 315
| 0
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__UpperCAmelCase : Any = logging.getLogger(__name__)
__UpperCAmelCase : Tuple = "pytorch_model.bin"
@dataclasses.dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : List[str] = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."})
__UpperCamelCase : Any = dataclasses.field(
default=lowerCamelCase__, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."}, )
@dataclasses.dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."})
__UpperCamelCase : int = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."})
__UpperCamelCase : Tuple = dataclasses.field(
default=lowerCamelCase__, metadata={"help": "A csv or a json file containing the validation data."})
__UpperCamelCase : Optional[int] = dataclasses.field(
default=lowerCamelCase__, metadata={"help": "The name of the task to train on."}, )
__UpperCamelCase : str = dataclasses.field(
default=lowerCamelCase__, metadata={"help": "The list of labels for the task."})
@dataclasses.dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Tuple = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."})
__UpperCamelCase : Union[str, Any] = dataclasses.field(
default="accuracy", metadata={"help": "The evaluation metric used for the task."})
__UpperCamelCase : List[str] = dataclasses.field(
default="no", metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
}, )
__UpperCamelCase : Union[str, Any] = dataclasses.field(
default=10, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, )
__UpperCamelCase : Union[str, Any] = dataclasses.field(
default=0.0, metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
}, )
__UpperCamelCase : Tuple = dataclasses.field(
default=lowerCamelCase__, metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."}, )
__UpperCamelCase : List[Any] = dataclasses.field(
default=lowerCamelCase__, metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."}, )
__UpperCamelCase : Any = dataclasses.field(
default=lowerCamelCase__, metadata={"help": "Whether to fine-tune on labeled data after pseudo training."}, )
__UpperCamelCase : Optional[int] = dataclasses.field(
default=0.0, metadata={"help": "Confidence threshold for pseudo-labeled data filtering."}, )
__UpperCamelCase : Dict = dataclasses.field(
default=100, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, )
__UpperCamelCase : Dict = dataclasses.field(
default=lowerCamelCase__, metadata={"help": "Random seed for initialization."}, )
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
UpperCamelCase : int = dataset.filter(lambda SCREAMING_SNAKE_CASE_ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCamelCase : List[str] = int(eval_result * len(SCREAMING_SNAKE_CASE_ ) )
print(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = dataset.sort('''probability''' , reverse=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[int] = dataset.remove_columns(['''label''', '''probability'''] )
UpperCamelCase : Union[str, Any] = dataset.rename_column('''prediction''' , '''label''' )
UpperCamelCase : Dict = dataset.map(lambda SCREAMING_SNAKE_CASE_ : {"label": idalabel[example["label"]]} )
UpperCamelCase : Any = dataset.shuffle(seed=args.seed )
UpperCamelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , F"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
else:
dataset.to_json(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCamelCase : Optional[Any] = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = STDataArguments(train_file=SCREAMING_SNAKE_CASE_ , infer_file=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE_ ).items():
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Sanity checks
UpperCamelCase : Optional[Any] = {}
UpperCamelCase : Tuple = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCamelCase : Optional[int] = args.train_file
UpperCamelCase : Optional[int] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCamelCase : Optional[int] = args.eval_file
for key in data_files:
UpperCamelCase : Any = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
UpperCamelCase : Any = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
UpperCamelCase : int = F"""{args.output_dir}/self-train_iter-{{}}""".format
UpperCamelCase : Dict = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE_ )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : Any = 0
UpperCamelCase : Optional[Any] = False
# Show the progress bar
UpperCamelCase : str = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
UpperCamelCase : Optional[Any] = data_dir_format(SCREAMING_SNAKE_CASE_ )
assert os.path.exists(SCREAMING_SNAKE_CASE_ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCamelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , '''stage-1''' )
UpperCamelCase : Dict = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
arguments_dict.update({key: value} )
UpperCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , SCREAMING_SNAKE_CASE_ )
finetune(**SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE_ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE_ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , '''best-checkpoint''' )
UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''stage-2''' )
# Update arguments_dict
UpperCamelCase : List[str] = model_path
UpperCamelCase : Tuple = data_files['''train''']
UpperCamelCase : Optional[Any] = current_output_dir
UpperCamelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , SCREAMING_SNAKE_CASE_ )
finetune(**SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE_ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = iteration
UpperCamelCase : int = data_dir_format(iteration + 1 )
UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE_ , '''best-checkpoint''' ) )
UpperCamelCase : List[Any] = config.idalabel
UpperCamelCase : str = os.path.join(SCREAMING_SNAKE_CASE_ , '''eval_results_best-checkpoint.json''' )
UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
UpperCamelCase : Union[str, Any] = float(json.load(SCREAMING_SNAKE_CASE_ )[args.eval_metric] )
UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(SCREAMING_SNAKE_CASE_ )
# Loading the dataset from local csv or json files.
UpperCamelCase : Optional[Any] = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
UpperCamelCase : List[Any] = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
shutil.copy(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , F"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
shutil.copy(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , F"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
UpperCamelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , F"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCamelCase : int = eval_result
if best_iteration is None:
UpperCamelCase : Tuple = new_iteration
UpperCamelCase : Optional[int] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCamelCase : Optional[int] = new_iteration
UpperCamelCase : Tuple = new_eval_result
UpperCamelCase : Optional[int] = 0
else:
if new_eval_result == best_eval_result:
UpperCamelCase : Tuple = new_iteration
UpperCamelCase : str = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCamelCase : Tuple = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , SCREAMING_SNAKE_CASE_ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE_ , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(SCREAMING_SNAKE_CASE_ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE_ , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(SCREAMING_SNAKE_CASE_ , '''eval_results_best-iteration.json''' ) , )
| 361
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a ( SCREAMING_SNAKE_CASE_ : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = fetch_california_housing()
UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 315
| 0
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : int = PhobertTokenizer
__UpperCamelCase : Optional[Any] = False
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : int = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
UpperCamelCase : Tuple = ['''#version: 0.2''', '''l à</w>''']
UpperCamelCase : List[Any] = {'''unk_token''': '''<unk>'''}
UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__a ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__a )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = '''Tôi là VinAI Research'''
UpperCamelCase : List[Any] = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase : Dict = '''Tôi là VinAI Research'''
UpperCamelCase : List[str] = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
UpperCamelCase : List[str] = tokenizer.tokenize(__a )
print(__a )
self.assertListEqual(__a , __a )
UpperCamelCase : Any = tokens + [tokenizer.unk_token]
UpperCamelCase : Dict = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
| 362
|
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase : Union[str, Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 315
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase : int = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : int = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 363
|
import collections
import os
import re
from pathlib import Path
__UpperCAmelCase : List[str] = "src/transformers"
# Matches is_xxx_available()
__UpperCAmelCase : int = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase : Optional[int] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase : List[Any] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__UpperCAmelCase : List[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase : str = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase : Dict = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase : str = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__UpperCAmelCase : Any = re.compile(r"^\s*try:")
# Catches a line with else:
__UpperCAmelCase : List[Any] = re.compile(r"^\s*else:")
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCamelCase : Tuple = f.readlines()
UpperCamelCase : Tuple = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase : List[Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCamelCase : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
UpperCamelCase : str = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCamelCase : List[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCamelCase : str = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
UpperCamelCase : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase : int = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCamelCase : Tuple = lines[line_index]
UpperCamelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase : Any = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCamelCase : Optional[Any] = lines[line_index]
UpperCamelCase : str = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
UpperCamelCase : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase : Dict = []
for key in import_dict_objects.keys():
UpperCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase : List[str] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' )
UpperCamelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
UpperCamelCase : str = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = short_path.replace(os.path.sep , '''.''' )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
__UpperCAmelCase : Optional[int] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def a ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
UpperCamelCase : Tuple = direct_transformers_import(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , '''r''' ) as f:
UpperCamelCase : List[Any] = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : str = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 315
| 0
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__UpperCAmelCase : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__)
class UpperCAmelCase_ ( snake_case__):
'''simple docstring'''
__UpperCamelCase : bool = field(default=snake_case__, metadata={"help": "Whether to use SortishSampler or not."})
__UpperCamelCase : bool = field(
default=snake_case__, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."})
__UpperCamelCase : Optional[int] = field(
default=snake_case__, metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
}, )
__UpperCamelCase : Optional[int] = field(
default=snake_case__, metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
}, )
__UpperCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case__, metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
}, )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_A , _A ):
UpperCamelCase : List[Any] = v.to_dict()
return d
| 364
|
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : Any = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 2_6
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCamelCase : List[Any] = True
elif char.isupper():
UpperCamelCase : List[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def a ( ):
"""simple docstring"""
from timeit import timeit
UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 315
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
UpperCamelCase : List[str] = len(snake_case__ )
UpperCamelCase : Union[str, Any] = max(snake_case__ )
UpperCamelCase : Optional[int] = min(snake_case__ )
# create the counting array
UpperCamelCase : Tuple = coll_max + 1 - coll_min
UpperCamelCase : Dict = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , snake_case__ ):
UpperCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCamelCase : List[Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , snake_case__ ) ):
UpperCamelCase : List[str] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
return "".join([chr(snake_case__ ) for i in counting_sort([ord(snake_case__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__UpperCAmelCase : Optional[Any] = input("Enter numbers separated by a comma:\n").strip()
__UpperCAmelCase : Tuple = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 365
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase : Union[str, Any] = logging.getLogger()
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase : List[str] = parser.parse_args()
return args.f
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 )
@slow
@require_torch_non_multi_gpu
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
| 315
| 0
|
from math import factorial
def a ( SCREAMING_SNAKE_CASE_ : int = 1_0_0 ):
"""simple docstring"""
return sum(int(lowerCAmelCase__ ) for x in str(factorial(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 366
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = "ibert"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = quant_mode
UpperCamelCase : Any = force_dequant
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 315
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class UpperCAmelCase_ ( UpperCAmelCase_):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = """marian"""
__UpperCamelCase : str = ["""past_key_values"""]
__UpperCamelCase : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __SCREAMING_SNAKE_CASE=58_101 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1_024 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=4_096 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=4_096 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=1_024 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=58_100 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=58_100 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : str = vocab_size
UpperCamelCase : Dict = decoder_vocab_size or vocab_size
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : Dict = d_model
UpperCamelCase : Any = encoder_ffn_dim
UpperCamelCase : List[str] = encoder_layers
UpperCamelCase : List[str] = encoder_attention_heads
UpperCamelCase : str = decoder_ffn_dim
UpperCamelCase : Any = decoder_layers
UpperCamelCase : List[str] = decoder_attention_heads
UpperCamelCase : Optional[int] = dropout
UpperCamelCase : Dict = attention_dropout
UpperCamelCase : List[str] = activation_dropout
UpperCamelCase : Optional[Any] = activation_function
UpperCamelCase : Union[str, Any] = init_std
UpperCamelCase : Tuple = encoder_layerdrop
UpperCamelCase : Dict = decoder_layerdrop
UpperCamelCase : Optional[Any] = use_cache
UpperCamelCase : int = encoder_layers
UpperCamelCase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase : Optional[Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
class UpperCAmelCase_ ( UpperCAmelCase_):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _lowercase ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCamelCase : Optional[int] = {0: '''batch'''}
UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCamelCase : Tuple = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCamelCase : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCamelCase : Tuple = self.num_layers
for i in range(__lowercase ):
UpperCamelCase : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCamelCase : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCamelCase : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _lowercase ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Any = super().outputs
else:
UpperCamelCase : Tuple = super(__lowercase , self ).outputs
if self.use_past:
UpperCamelCase : str = self.num_layers
for i in range(__lowercase ):
UpperCamelCase : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCamelCase : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Generate decoder inputs
UpperCamelCase : Dict = seq_length if not self.use_past else 1
UpperCamelCase : str = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
UpperCamelCase : Any = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase : List[str] = dict(**__lowercase , **__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCamelCase : str = common_inputs['''input_ids'''].shape
UpperCamelCase : str = common_inputs['''decoder_input_ids'''].shape[1]
UpperCamelCase : List[str] = self.num_attention_heads
UpperCamelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase : List[Any] = decoder_seq_length + 3
UpperCamelCase : Any = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowercase , __lowercase )] , dim=1 )
UpperCamelCase : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase : int = self.num_layers
UpperCamelCase : Optional[Any] = min(__lowercase , __lowercase )
UpperCamelCase : str = max(__lowercase , __lowercase ) - min_num_layers
UpperCamelCase : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
) )
# TODO: test this.
UpperCamelCase : Optional[Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowercase , __lowercase ):
common_inputs["past_key_values"].append((torch.zeros(__lowercase ), torch.zeros(__lowercase )) )
return common_inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : str = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCamelCase : List[str] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCamelCase : Optional[int] = seqlen + 2
UpperCamelCase : List[Any] = self.num_layers
UpperCamelCase : Union[str, Any] = self.num_attention_heads
UpperCamelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase : Dict = common_inputs['''attention_mask'''].dtype
UpperCamelCase : List[str] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase )] , dim=1 )
UpperCamelCase : Any = [
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(__lowercase )
]
return common_inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : Tuple = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase : List[str] = tokenizer.num_special_tokens_to_add(__lowercase )
UpperCamelCase : Any = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase : Optional[Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase : str = dict(tokenizer(__lowercase , return_tensors=__lowercase ) )
return common_inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
else:
UpperCamelCase : Any = self._generate_dummy_inputs_for_causal_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
return common_inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Tuple = super()._flatten_past_key_values_(__lowercase , __lowercase , __lowercase , __lowercase )
else:
UpperCamelCase : Any = super(__lowercase , self )._flatten_past_key_values_(
__lowercase , __lowercase , __lowercase , __lowercase )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
| 367
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__UpperCAmelCase : int = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : int = []
UpperCamelCase : List[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase : Tuple = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) )
UpperCamelCase : Optional[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' )
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[str] = []
UpperCamelCase : str = []
for element in html_code.descendants:
if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase : Any = html.unescape(__SCREAMING_SNAKE_CASE ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : int = self.xpath_soup(__SCREAMING_SNAKE_CASE )
stringaxtag_seq.append(__SCREAMING_SNAKE_CASE )
stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ''''''
for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = False
# Check that strings has a valid type
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = True
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"""but is of type {type(__SCREAMING_SNAKE_CASE )}.""" )
UpperCamelCase : int = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) )
if not is_batched:
UpperCamelCase : Union[str, Any] = [html_strings]
# Get nodes + xpaths
UpperCamelCase : str = []
UpperCamelCase : int = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.get_three_from_single(__SCREAMING_SNAKE_CASE )
nodes.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
xpath_strings.append(__SCREAMING_SNAKE_CASE )
xpaths.append(__SCREAMING_SNAKE_CASE )
# return as Dict
UpperCamelCase : List[str] = {'''nodes''': nodes, '''xpaths''': xpaths}
UpperCamelCase : List[Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 315
| 0
|
import sys
import turtle
def a ( SCREAMING_SNAKE_CASE_ : tuple[float, float] , SCREAMING_SNAKE_CASE_ : tuple[float, float] ):
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def a ( SCREAMING_SNAKE_CASE_ : tuple[float, float] , SCREAMING_SNAKE_CASE_ : tuple[float, float] , SCREAMING_SNAKE_CASE_ : tuple[float, float] , SCREAMING_SNAKE_CASE_ : int , ):
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(snake_case_ , get_mid(snake_case_ , snake_case_ ) , get_mid(snake_case_ , snake_case_ ) , depth - 1 )
triangle(snake_case_ , get_mid(snake_case_ , snake_case_ ) , get_mid(snake_case_ , snake_case_ ) , depth - 1 )
triangle(snake_case_ , get_mid(snake_case_ , snake_case_ ) , get_mid(snake_case_ , snake_case_ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
__UpperCAmelCase : List[Any] = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
__UpperCAmelCase : Union[str, Any] = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 368
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__UpperCAmelCase : List[str] = getLogger(__name__)
__UpperCAmelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : int="summarization" , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Any , ):
"""simple docstring"""
UpperCamelCase : Dict = Path(SCREAMING_SNAKE_CASE_ ).open('''w''' , encoding='''utf-8''' )
UpperCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if fpaa:
UpperCamelCase : List[Any] = model.half()
UpperCamelCase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
UpperCamelCase : int = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if prefix is None:
UpperCamelCase : Union[str, Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ):
UpperCamelCase : Optional[int] = [prefix + text for text in examples_chunk]
UpperCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ , padding='''longest''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
UpperCamelCase : str = int(time.time() - start_time ) # seconds
UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
"""simple docstring"""
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=SCREAMING_SNAKE_CASE_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=SCREAMING_SNAKE_CASE_ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=SCREAMING_SNAKE_CASE_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=SCREAMING_SNAKE_CASE_ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
UpperCamelCase , UpperCamelCase : int = parser.parse_known_args()
UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
UpperCamelCase : str = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
UpperCamelCase : Tuple = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
UpperCamelCase : str = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE_ , )
if args.reference_path is None:
return {}
# Compute scores
UpperCamelCase : Tuple = calculate_bleu if '''translation''' in args.task else calculate_rouge
UpperCamelCase : Dict = [x.rstrip() for x in open(args.save_path ).readlines()]
UpperCamelCase : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase : dict = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
scores.update(SCREAMING_SNAKE_CASE_ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE_ )
if args.info:
UpperCamelCase : Optional[Any] = args.info
if verbose:
print(SCREAMING_SNAKE_CASE_ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE_ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 315
| 0
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 384, 24, 24] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
UpperCamelCase : int = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : str = image_size
UpperCamelCase : str = patch_size
UpperCamelCase : Any = num_channels
UpperCamelCase : Optional[Any] = is_training
UpperCamelCase : str = use_labels
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : str = backbone_out_indices
UpperCamelCase : List[Any] = num_attention_heads
UpperCamelCase : Any = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Optional[int] = attention_probs_dropout_prob
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : Tuple = num_labels
UpperCamelCase : Tuple = backbone_featmap_shape
UpperCamelCase : Optional[int] = scope
UpperCamelCase : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase : str = (image_size // patch_size) ** 2
UpperCamelCase : Optional[int] = num_patches + 1
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Any = None
if self.use_labels:
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase : int = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_UpperCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = DPTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.num_labels
UpperCamelCase : Dict = DPTForDepthEstimation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase : str = model(_UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : Optional[int] = DPTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase : Optional[int] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
UpperCamelCase : List[Any] = config_and_inputs
UpperCamelCase : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _UpperCamelCase, _UpperCamelCase, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__UpperCamelCase : int = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Dict = False
__UpperCamelCase : List[str] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = DPTModelTester(self )
UpperCamelCase : int = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = model_class(_UpperCAmelCase )
UpperCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCamelCase : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_UpperCAmelCase )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def _lowercase ( self ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Tuple = True
if model_class in get_values(_UpperCAmelCase ):
continue
UpperCamelCase : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase : Optional[int] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase : Optional[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def _lowercase ( self ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Optional[Any] = False
UpperCamelCase : List[Any] = True
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase : int = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase : Optional[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[str] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
UpperCamelCase : Tuple = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
UpperCamelCase : Any = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase : Union[str, Any] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase : List[Any] = DPTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Optional[Any] = 'add'
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase : List[str] = DPTForDepthEstimation(_UpperCAmelCase )
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCamelCase : Union[str, Any] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_UpperCAmelCase )
UpperCamelCase : Optional[int] = prepare_img()
UpperCamelCase : Optional[Any] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**_UpperCAmelCase )
UpperCamelCase : Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase : str = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _UpperCAmelCase )
UpperCamelCase : int = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _UpperCAmelCase , atol=1e-4 ) )
| 369
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : int = ["image_processor", "tokenizer"]
__UpperCamelCase : List[str] = "AutoImageProcessor"
__UpperCamelCase : Optional[Any] = "AutoTokenizer"
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
UpperCamelCase : Any = kwargs.pop('''feature_extractor''' )
UpperCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.image_processor
UpperCamelCase : int = False
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = kwargs.pop('''images''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = kwargs.pop('''text''' , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase : Union[str, Any] = args[0]
UpperCamelCase : str = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
UpperCamelCase : List[str] = self.image_processor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
UpperCamelCase : Optional[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase : List[str] = encodings['''input_ids''']
return inputs
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@contextmanager
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
UpperCamelCase : Any = True
UpperCamelCase : int = self.tokenizer
yield
UpperCamelCase : List[Any] = self.image_processor
UpperCamelCase : Tuple = False
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if added_vocab is None:
UpperCamelCase : str = self.tokenizer.get_added_vocab()
UpperCamelCase : int = {}
while tokens:
UpperCamelCase : Dict = re.search(R'''<s_(.*?)>''' , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
UpperCamelCase : List[str] = start_token.group(1 )
UpperCamelCase : Dict = re.search(Rf"""</s_{key}>""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
UpperCamelCase : Any = start_token.group()
if end_token is None:
UpperCamelCase : Optional[int] = tokens.replace(__SCREAMING_SNAKE_CASE , '''''' )
else:
UpperCamelCase : Dict = end_token.group()
UpperCamelCase : int = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
UpperCamelCase : Dict = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
UpperCamelCase : Tuple = self.tokenajson(__SCREAMING_SNAKE_CASE , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if value:
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : str = value[0]
UpperCamelCase : str = value
else: # leaf nodes
UpperCamelCase : Optional[int] = []
for leaf in content.split(R'''<sep/>''' ):
UpperCamelCase : Optional[int] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
UpperCamelCase : int = leaf[1:-2] # for categorical special tokens
output[key].append(__SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
UpperCamelCase : Tuple = output[key][0]
UpperCamelCase : List[Any] = tokens[tokens.find(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 315
| 0
|
"""simple docstring"""
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
if edge <= 0 or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if edge <= 0 or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError('''Length must be a positive.''' )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 315
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCAmelCase_ ( lowerCamelCase_):
'''simple docstring'''
__UpperCamelCase : Dict = '''visual_bert'''
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
UpperCamelCase : Dict = vocab_size
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Dict = visual_embedding_dim
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : List[Any] = type_vocab_size
UpperCamelCase : Any = layer_norm_eps
UpperCamelCase : Dict = bypass_transformer
UpperCamelCase : Optional[int] = special_visual_initialize
| 371
|
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ):
"""simple docstring"""
UpperCamelCase : List[str] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 315
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Tuple = "megatron-bert"
def __init__( self , __SCREAMING_SNAKE_CASE=29_056 , __SCREAMING_SNAKE_CASE=1_024 , __SCREAMING_SNAKE_CASE=24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=4_096 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : Tuple = hidden_act
UpperCamelCase : Tuple = intermediate_size
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : Optional[int] = max_position_embeddings
UpperCamelCase : Any = type_vocab_size
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : int = position_embedding_type
UpperCamelCase : List[Any] = use_cache
| 350
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = eval_examples
UpperCamelCase : Optional[Any] = post_process_function
def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ):
"""simple docstring"""
UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Any = self.compute_metrics
UpperCamelCase : List[Any] = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Dict = time.time()
try:
UpperCamelCase : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : Union[str, Any] = compute_metrics
UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions )
UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
UpperCamelCase : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ):
"""simple docstring"""
UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Union[str, Any] = self.compute_metrics
UpperCamelCase : Tuple = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Optional[int] = time.time()
try:
UpperCamelCase : int = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : int = compute_metrics
UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' )
UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 315
| 0
|
from scipy.stats import spearmanr
import datasets
__UpperCAmelCase : Dict = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
__UpperCAmelCase : Dict = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
__UpperCAmelCase : str = r"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
UpperCamelCase : Any = spearmanr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 351
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(sorted(SCREAMING_SNAKE_CASE_ ) )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return word_by_signature[signature(SCREAMING_SNAKE_CASE_ )]
__UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__UpperCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()})
__UpperCAmelCase : Union[str, Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__UpperCAmelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 315
| 0
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(
_a, r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ", )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.framework == "tf":
UpperCamelCase : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCamelCase : Dict = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = self.get_masked_index(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if return_tensors is None:
UpperCamelCase : Any = self.framework
UpperCamelCase : Optional[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
return model_inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = self.model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = model_inputs['''input_ids''']
return model_outputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCamelCase : str = target_ids.shape[0]
UpperCamelCase : List[Any] = model_outputs['''input_ids'''][0]
UpperCamelCase : Optional[Any] = model_outputs['''logits''']
if self.framework == "tf":
UpperCamelCase : Optional[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCamelCase : int = outputs.numpy()
UpperCamelCase : Dict = outputs[0, masked_index, :]
UpperCamelCase : List[Any] = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
UpperCamelCase : int = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCamelCase : str = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
UpperCamelCase : Tuple = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = topk.values.numpy(), topk.indices.numpy()
else:
UpperCamelCase : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCamelCase : Optional[Any] = outputs[0, masked_index, :]
UpperCamelCase : Optional[int] = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCamelCase : Union[str, Any] = probs[..., target_ids]
UpperCamelCase : int = probs.topk(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = []
UpperCamelCase : Dict = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCamelCase : List[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCamelCase : str = input_ids.numpy().copy()
if target_ids is not None:
UpperCamelCase : Dict = target_ids[p].tolist()
UpperCamelCase : Any = p
# Filter padding out:
UpperCamelCase : Union[str, Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCamelCase : Dict = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = [targets]
try:
UpperCamelCase : int = self.tokenizer.get_vocab()
except Exception:
UpperCamelCase : int = {}
UpperCamelCase : Optional[int] = []
for target in targets:
UpperCamelCase : int = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if id_ is None:
UpperCamelCase : Optional[Any] = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids''']
if len(__SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
UpperCamelCase : Optional[int] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCamelCase : Optional[int] = list(set(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
UpperCamelCase : List[Any] = np.array(__SCREAMING_SNAKE_CASE )
return target_ids
def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase : Tuple = {}
if targets is not None:
UpperCamelCase : List[Any] = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = target_ids
if top_k is not None:
UpperCamelCase : List[Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 352
|
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) )
return data_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
score_lists.append(SCREAMING_SNAKE_CASE_ )
return score_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = final_scores[j] + ele
return final_scores
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
source_data[i].append(SCREAMING_SNAKE_CASE_ )
return source_data
| 315
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCAmelCase : List[str] = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCamelCase : Optional[str] = field(
default=_a, metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCamelCase : Optional[str] = field(
default=_a, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCamelCase : Optional[str] = field(
default=_a, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
__UpperCamelCase : bool = field(default=_a, metadata={"help": "Whether tp freeze the encoder."})
__UpperCamelCase : bool = field(default=_a, metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__UpperCamelCase : Optional[str] = field(
default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, )
__UpperCamelCase : Optional[int] = field(
default=1024, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
__UpperCamelCase : Optional[int] = field(
default=128, metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
__UpperCamelCase : Optional[int] = field(
default=142, metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
}, )
__UpperCamelCase : Optional[int] = field(
default=142, metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
__UpperCamelCase : Optional[int] = field(default=-1, metadata={"help": "# training examples. -1 means use all."})
__UpperCamelCase : Optional[int] = field(default=-1, metadata={"help": "# validation examples. -1 means use all."})
__UpperCamelCase : Optional[int] = field(default=-1, metadata={"help": "# test examples. -1 means use all."})
__UpperCamelCase : Optional[str] = field(default=_a, metadata={"help": "Source language id for translation."})
__UpperCamelCase : Optional[str] = field(default=_a, metadata={"help": "Target language id for translation."})
__UpperCamelCase : Optional[int] = field(default=_a, metadata={"help": "# num_beams to use for evaluation."})
__UpperCamelCase : bool = field(
default=_a, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, )
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , F"""{split}_results.json""" ) )
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase : List[Any] = parser.parse_args_into_dataclasses()
check_output_dir(SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase : Dict = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase : Optional[int] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(SCREAMING_SNAKE_CASE_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase : str = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(SCREAMING_SNAKE_CASE_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase : List[Any] = SeqaSeqDataset
# Get datasets
UpperCamelCase : Union[str, Any] = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
UpperCamelCase : Any = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase : Optional[int] = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase : List[Any] = (
build_compute_metrics_fn(data_args.task , SCREAMING_SNAKE_CASE_ ) if training_args.predict_with_generate else None
)
UpperCamelCase : Any = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , data_args=SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , data_collator=SeqaSeqDataCollator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Tuple = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
UpperCamelCase : str = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase : int = train_result.metrics
UpperCamelCase : Tuple = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase : str = trainer.evaluate(metric_key_prefix='''val''' )
UpperCamelCase : Dict = data_args.n_val
UpperCamelCase : Optional[Any] = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCamelCase : str = trainer.predict(test_dataset=SCREAMING_SNAKE_CASE_ , metric_key_prefix='''test''' )
UpperCamelCase : Tuple = test_output.metrics
UpperCamelCase : Tuple = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase : Optional[int] = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
if training_args.predict_with_generate:
UpperCamelCase : Tuple = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = lmap(str.strip , SCREAMING_SNAKE_CASE_ )
write_txt_file(SCREAMING_SNAKE_CASE_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 353
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Union[str, Any] = ""
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def a ( ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('''Processing...''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index, image in enumerate(SCREAMING_SNAKE_CASE_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase : Optional[int] = random_chars(3_2 )
UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" )
UpperCamelCase : Any = []
for anno in new_annos[index]:
UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(F"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ):
UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
UpperCamelCase : List[str] = in_file.readlines()
UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" )
UpperCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : str = []
UpperCamelCase : int = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = []
UpperCamelCase : Optional[int] = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = anno_list[idx]
UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ )
if flip_type == 1:
UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE_ )
new_imgs_list.append(SCREAMING_SNAKE_CASE_ )
return new_imgs_list, new_annos_lists, path_list
def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase : Any = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 315
| 0
|
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__UpperCAmelCase : Optional[Any] = sys.version_info >= (3, 10)
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : int
__UpperCamelCase : float
__UpperCamelCase : str
__UpperCamelCase : bool
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : int = 42
__UpperCamelCase : str = field(default="toto", metadata={"help": "help message"})
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : bool = False
__UpperCamelCase : bool = True
__UpperCamelCase : Optional[bool] = None
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = "titi"
__UpperCamelCase : Optional[Any] = "toto"
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "titi"
__UpperCamelCase : Union[str, Any] = "toto"
__UpperCamelCase : Optional[Any] = 42
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : BasicEnum = "toto"
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = BasicEnum(self.foo )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : MixedTypeEnum = "toto"
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = MixedTypeEnum(self.foo )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[float] = field(default=_a, metadata={"help": "help message"})
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[List[str]] = list_field(default=[])
__UpperCamelCase : Optional[List[int]] = list_field(default=[])
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : List[int] = list_field(default=[])
__UpperCamelCase : List[int] = list_field(default=[1, 2, 3])
__UpperCamelCase : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"])
__UpperCamelCase : List[float] = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : List[int] = field()
__UpperCamelCase : str = field()
__UpperCamelCase : BasicEnum = field()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = BasicEnum(self.required_enum )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : int
__UpperCamelCase : "BasicEnum" = field()
__UpperCamelCase : "Optional[bool]" = None
__UpperCamelCase : "str" = field(default="toto", metadata={"help": "help message"})
__UpperCamelCase : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"])
if is_python_no_less_than_3_10:
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : bool = False
__UpperCamelCase : bool = True
__UpperCamelCase : bool | None = None
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : int | None = None
__UpperCamelCase : float | None = field(default=_a, metadata={"help": "help message"})
__UpperCamelCase : str | None = None
__UpperCamelCase : list[str] | None = list_field(default=[])
__UpperCamelCase : list[int] | None = list_field(default=[])
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != '''container'''}
UpperCamelCase = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , __SCREAMING_SNAKE_CASE ) and yy.get('''choices''' , __SCREAMING_SNAKE_CASE ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](__SCREAMING_SNAKE_CASE ) , yy['''type'''](__SCREAMING_SNAKE_CASE ) )
del xx["type"], yy["type"]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--bar''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--baz''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--flag''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
(UpperCamelCase ) = parser.parse_args_into_dataclasses(__SCREAMING_SNAKE_CASE , look_for_args_file=__SCREAMING_SNAKE_CASE )
self.assertFalse(example.flag )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--baz''' , default='''toto''' , type=__SCREAMING_SNAKE_CASE , help='''help message''' )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' )
expected.add_argument('''--baz''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=__SCREAMING_SNAKE_CASE , dest='''baz''' )
expected.add_argument('''--opt''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
UpperCamelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = parser.parse_args([] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
UpperCamelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
UpperCamelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
UpperCamelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
UpperCamelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
UpperCamelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
UpperCamelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
UpperCamelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _lowercase ( self ):
"""simple docstring"""
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Literal["titi", "toto", 42] = "toto"
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
UpperCamelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
UpperCamelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = parser.parse_args([] )
self.assertEqual(
__SCREAMING_SNAKE_CASE , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--bar''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help='''help message''' )
expected.add_argument('''--baz''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE )
UpperCamelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = parser.parse_args([] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , bar=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , ces=[] , des=[] ) )
UpperCamelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--required_str''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__SCREAMING_SNAKE_CASE , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__SCREAMING_SNAKE_CASE , )
expected.add_argument('''--opt''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--baz''' , default='''toto''' , type=__SCREAMING_SNAKE_CASE , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
UpperCamelCase = parser.parse_dict(__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(__SCREAMING_SNAKE_CASE , parser.parse_dict , __SCREAMING_SNAKE_CASE , allow_extra_keys=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(__SCREAMING_SNAKE_CASE , '''temp_json''' )
os.mkdir(__SCREAMING_SNAKE_CASE )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
UpperCamelCase = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(__SCREAMING_SNAKE_CASE , '''temp_yaml''' )
os.mkdir(__SCREAMING_SNAKE_CASE )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
UpperCamelCase = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 354
|
import qiskit
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : int = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 315
| 0
|
import torch
from transformers import AutoModel
class UpperCAmelCase_ ( torch.nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
UpperCamelCase : Dict = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCamelCase : int = torch.nn.Softmax(dim=1 )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = W_supports['''sizes'''].tolist()
UpperCamelCase : Optional[int] = W_supports['''start_token_id'''].item()
UpperCamelCase : Dict = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase : Optional[Any] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = None
UpperCamelCase : Dict = None
UpperCamelCase : str = W_supports['''input_ids'''] == start_token_id
UpperCamelCase : Union[str, Any] = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase : int = 0
else:
UpperCamelCase : Tuple = support_sizes[i - 1]
UpperCamelCase : Tuple = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase : int = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase : Union[str, Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase : Optional[Any] = torch.vstack((p_starts, p_start) )
UpperCamelCase : List[str] = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase : Optional[int] = p_start
UpperCamelCase : Any = p_end
return p_starts, p_ends
| 355
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = CLIPConfig
__UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"]
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = CLIPVisionModel(config.vision_config )
UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy()
UpperCamelCase : Dict = []
UpperCamelCase : List[str] = image_embeds.shape[0]
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Optional[int] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase : List[str] = special_cos_dist[i][concept_idx]
UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCamelCase : Optional[int] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase : Optional[int] = cos_dist[i][concept_idx]
UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds )
UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Union[str, Any] = 0.0
UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
UpperCamelCase : int = special_care * 0.01
UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 315
| 0
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ):
"""simple docstring"""
UpperCamelCase : List[str] = parent
UpperCamelCase : int = batch_size
UpperCamelCase : Dict = seq_length
UpperCamelCase : str = is_training
UpperCamelCase : Union[str, Any] = use_attention_mask
UpperCamelCase : Dict = use_token_type_ids
UpperCamelCase : Optional[int] = use_labels
UpperCamelCase : Dict = vocab_size
UpperCamelCase : Dict = hidden_size
UpperCamelCase : Optional[int] = num_hidden_layers
UpperCamelCase : List[Any] = num_attention_heads
UpperCamelCase : Any = intermediate_size
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : Tuple = hidden_dropout_prob
UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Optional[int] = type_vocab_size
UpperCamelCase : str = type_sequence_label_size
UpperCamelCase : str = initializer_range
UpperCamelCase : Dict = num_choices
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Dict = None
if self.use_attention_mask:
UpperCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : List[str] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.prepare_config_and_inputs()
UpperCamelCase : Any = config_and_inputs
UpperCamelCase : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
UpperCamelCase : Tuple = config_and_inputs
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = FlaxRobertaPreLayerNormModelTester(self )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase : int = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
UpperCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase : int = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , __SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCamelCase : List[str] = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
UpperCamelCase : int = model(__SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
UpperCamelCase : str = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 356
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
UpperCamelCase : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 315
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = LDMTextToImagePipeline
__UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
__UpperCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
__UpperCamelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : Optional[Any] = False
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase : str = CLIPTextModel(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
UpperCamelCase : List[str] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : List[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Dict = self.get_dummy_components()
UpperCamelCase : Optional[Any] = LDMTextToImagePipeline(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = pipe(**__SCREAMING_SNAKE_CASE ).images
UpperCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
UpperCamelCase : Optional[Any] = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=torch.floataa , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
UpperCamelCase : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = np.random.RandomState(__SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 32, 32) )
UpperCamelCase : str = torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = self.get_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = pipe(**__SCREAMING_SNAKE_CASE ).images
UpperCamelCase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
UpperCamelCase : Optional[int] = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] )
UpperCamelCase : Union[str, Any] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=torch.floataa , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
UpperCamelCase : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = np.random.RandomState(__SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 32, 32) )
UpperCamelCase : Dict = torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = self.get_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = pipe(**__SCREAMING_SNAKE_CASE ).images[0]
UpperCamelCase : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
UpperCamelCase : Optional[int] = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 357
|
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315
| 0
|
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCAmelCase : Any = getLogger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int = 1_0_2_4 , SCREAMING_SNAKE_CASE_ : List[Any]="val" , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Dict="summarization" , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[Any]=1 , SCREAMING_SNAKE_CASE_ : Dict = None , SCREAMING_SNAKE_CASE_ : str="" , **SCREAMING_SNAKE_CASE_ : Any , ):
"""simple docstring"""
UpperCamelCase : Optional[int] = str(SCREAMING_SNAKE_CASE_ )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = Path(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = save_dir.joinpath(F"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).cuda()
if fpaa:
UpperCamelCase : str = model.half()
# determine if we need to increase num_beams
use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # update config with task specific params
UpperCamelCase : Union[str, Any] = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCamelCase : List[str] = num_return_sequences
UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCamelCase : Union[str, Any] = tokenizer.model_max_length
if prefix is None:
UpperCamelCase : List[str] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
UpperCamelCase : List[Any] = SeqaSeqDataset(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_target_length=1_0_2_4 , type_path=SCREAMING_SNAKE_CASE_ , n_obs=SCREAMING_SNAKE_CASE_ , prefix=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCamelCase : Dict = ds.make_sortish_sampler(SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , add_extra_examples=SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , collate_fn=ds.collate_fn )
UpperCamelCase : List[Any] = []
for batch in tqdm(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=SCREAMING_SNAKE_CASE_ , num_beams=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = batch['''ids''']
if num_return_sequences > 1:
UpperCamelCase : Optional[Any] = chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(SCREAMING_SNAKE_CASE_ ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return results, sampler.num_replicas
def a ( ):
"""simple docstring"""
UpperCamelCase : str = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=SCREAMING_SNAKE_CASE_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=SCREAMING_SNAKE_CASE_ , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ )
parser.add_argument(
'''--type_path''' , type=SCREAMING_SNAKE_CASE_ , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=SCREAMING_SNAKE_CASE_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=SCREAMING_SNAKE_CASE_ , default=1 , required=SCREAMING_SNAKE_CASE_ , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=SCREAMING_SNAKE_CASE_ , default=6_0_0 , required=SCREAMING_SNAKE_CASE_ , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ )
parser.add_argument('''--tgt_lang''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ )
parser.add_argument(
'''--prefix''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
UpperCamelCase : List[Any] = time.time()
UpperCamelCase : Optional[Any] = parser.parse_known_args()
UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ )
if generate_kwargs and args.local_rank <= 0:
print(F"""parsed the following generate kwargs: {generate_kwargs}""" )
UpperCamelCase : Optional[Any] = Path(args.save_dir + '''_tmp''' )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) # this handles locking.
UpperCamelCase : List[Any] = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(F"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCamelCase : Union[str, Any] = {}
if args.src_lang is not None:
UpperCamelCase : str = args.src_lang
if args.tgt_lang is not None:
UpperCamelCase : Optional[int] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = eval_data_dir(
args.data_dir , SCREAMING_SNAKE_CASE_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if args.local_rank <= 0:
UpperCamelCase : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = gather_results_from_each_node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , args.sync_timeout )
UpperCamelCase : List[str] = combine_partial_results(SCREAMING_SNAKE_CASE_ )
if args.num_return_sequences > 1:
UpperCamelCase : Tuple = save_dir.joinpath('''pseudolabel_results.json''' )
print(F"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return
UpperCamelCase : List[str] = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(SCREAMING_SNAKE_CASE_ ) as f:
UpperCamelCase : Union[str, Any] = [x.rstrip() for x in f.readlines()][: len(SCREAMING_SNAKE_CASE_ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCamelCase : List[Any] = '''translation''' in args.task
UpperCamelCase : Dict = calculate_bleu if calc_bleu else calculate_rouge
UpperCamelCase : List[Any] = '''bleu''' if calc_bleu else '''rouge'''
UpperCamelCase : Dict = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = time.time() - start_time
UpperCamelCase : int = round(runtime / metrics['''n_obs'''] , 4 )
UpperCamelCase : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCamelCase : List[str] = save_dir.joinpath(F"""{args.type_path}_{metric_name}.json""" )
save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
write_txt_file(SCREAMING_SNAKE_CASE_ , save_dir.joinpath(F"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(SCREAMING_SNAKE_CASE_ , save_dir.joinpath(F"""{args.type_path}.target""" ) )
else:
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = []
for partial_result in partial_results:
records.extend(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x["id"] )
UpperCamelCase : Union[str, Any] = [x['''pred'''] for x in records]
return preds
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = time.time()
logger.info('''waiting for all nodes to finish''' )
UpperCamelCase : int = None
while (time.time() - start_wait) < timeout:
UpperCamelCase : Optional[Any] = list(save_dir.glob('''rank_*.json''' ) )
if len(SCREAMING_SNAKE_CASE_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCamelCase : Union[str, Any] = lmap(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 358
|
import math
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = factor * value
UpperCamelCase : Optional[int] = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 315
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : int = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Any = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : Tuple = {
"google/rembert": 256,
}
__UpperCAmelCase : Union[str, Any] = "▁"
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = RemBertTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : int = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = do_lower_case
UpperCamelCase : Tuple = remove_space
UpperCamelCase : Tuple = keep_accents
UpperCamelCase : Optional[Any] = vocab_file
UpperCamelCase : List[Any] = False if not self.vocab_file else True
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : List[str] = [self.sep_token_id]
UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : List[str] = [self.sep_token_id]
UpperCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__SCREAMING_SNAKE_CASE ) )
return
UpperCamelCase : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 359
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 315
| 0
|
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
UpperCamelCase : str = {}
def _lowercase ( self ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(__SCREAMING_SNAKE_CASE , ''' -> ''' , ''' -> '''.join([str(__SCREAMING_SNAKE_CASE ) for j in self.vertex[i]] ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__SCREAMING_SNAKE_CASE )
else:
# else make a new vertex
UpperCamelCase : Union[str, Any] = [to_vertex]
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = True
print(__SCREAMING_SNAKE_CASE , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__UpperCAmelCase : Union[str, Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 360
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : List[str] = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Union[str, Any] = min_resolution
UpperCamelCase : Tuple = max_resolution
UpperCamelCase : List[str] = do_resize
UpperCamelCase : List[str] = size
UpperCamelCase : int = apply_ocr
def _lowercase ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE )
# Test batched
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : Optional[int] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
UpperCamelCase : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase : Union[str, Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCamelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE )
# with apply_OCR = False
UpperCamelCase : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 315
| 0
|
__UpperCAmelCase : Tuple = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : Dict = set()
# keep track of all the paths to be checked
UpperCamelCase : int = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase : Union[str, Any] = queue.pop(0 )
# get the last node from the path
UpperCamelCase : Tuple = path[-1]
if node not in explored:
UpperCamelCase : int = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase : str = list(SCREAMING_SNAKE_CASE_ )
new_path.append(SCREAMING_SNAKE_CASE_ )
queue.append(SCREAMING_SNAKE_CASE_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(SCREAMING_SNAKE_CASE_ )
# in case there's no path between the 2 nodes
return []
def a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase : Union[str, Any] = [start]
UpperCamelCase : List[Any] = set(SCREAMING_SNAKE_CASE_ )
# Keep tab on distances from `start` node.
UpperCamelCase : Dict = {start: 0, target: -1}
while queue:
UpperCamelCase : Dict = queue.pop(0 )
if node == target:
UpperCamelCase : Dict = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(SCREAMING_SNAKE_CASE_ )
queue.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 361
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a ( SCREAMING_SNAKE_CASE_ : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = fetch_california_housing()
UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 315
| 0
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
UpperCamelCase : Optional[int] = RemBertConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print('''Building PyTorch model from configuration: {}'''.format(str(SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : Dict = RemBertModel(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(SCREAMING_SNAKE_CASE_ ) )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 362
|
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase : Union[str, Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 315
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "blip_text_model"
def __init__( self , __SCREAMING_SNAKE_CASE=30_524 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=102 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , sep_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[Any] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : int = encoder_hidden_size
UpperCamelCase : str = intermediate_size
UpperCamelCase : Tuple = projection_dim
UpperCamelCase : Tuple = hidden_dropout_prob
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : Any = max_position_embeddings
UpperCamelCase : Dict = layer_norm_eps
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : int = is_decoder
UpperCamelCase : Tuple = use_cache
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
UpperCamelCase : Tuple = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = "blip_vision_model"
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=384 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1e-10 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : List[str] = intermediate_size
UpperCamelCase : Optional[int] = projection_dim
UpperCamelCase : Optional[int] = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : int = patch_size
UpperCamelCase : Optional[Any] = image_size
UpperCamelCase : str = initializer_range
UpperCamelCase : Optional[Any] = attention_dropout
UpperCamelCase : Optional[Any] = layer_norm_eps
UpperCamelCase : Optional[int] = hidden_act
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
UpperCamelCase : Union[str, Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Tuple = "blip"
__UpperCamelCase : int = True
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2.6_592 , __SCREAMING_SNAKE_CASE=256 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
if text_config is None:
UpperCamelCase : Dict = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
UpperCamelCase : List[Any] = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
UpperCamelCase : List[str] = BlipTextConfig(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = BlipVisionConfig(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.vision_config.hidden_size
UpperCamelCase : str = projection_dim
UpperCamelCase : Optional[Any] = logit_scale_init_value
UpperCamelCase : Union[str, Any] = 1.0
UpperCamelCase : str = 0.02
UpperCamelCase : Union[str, Any] = image_text_hidden_size
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = copy.deepcopy(self.__dict__ )
UpperCamelCase : List[str] = self.text_config.to_dict()
UpperCamelCase : int = self.vision_config.to_dict()
UpperCamelCase : Tuple = self.__class__.model_type
return output
| 363
|
import collections
import os
import re
from pathlib import Path
__UpperCAmelCase : List[str] = "src/transformers"
# Matches is_xxx_available()
__UpperCAmelCase : int = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase : Optional[int] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase : List[Any] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__UpperCAmelCase : List[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase : str = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase : Dict = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase : str = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__UpperCAmelCase : Any = re.compile(r"^\s*try:")
# Catches a line with else:
__UpperCAmelCase : List[Any] = re.compile(r"^\s*else:")
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCamelCase : Tuple = f.readlines()
UpperCamelCase : Tuple = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase : List[Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCamelCase : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
UpperCamelCase : str = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCamelCase : List[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCamelCase : str = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
UpperCamelCase : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase : int = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCamelCase : Tuple = lines[line_index]
UpperCamelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase : Any = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCamelCase : Optional[Any] = lines[line_index]
UpperCamelCase : str = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
UpperCamelCase : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase : Dict = []
for key in import_dict_objects.keys():
UpperCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase : List[str] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' )
UpperCamelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
UpperCamelCase : str = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = short_path.replace(os.path.sep , '''.''' )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
__UpperCAmelCase : Optional[int] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def a ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
UpperCamelCase : Tuple = direct_transformers_import(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , '''r''' ) as f:
UpperCamelCase : List[Any] = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : str = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 315
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ):
"""simple docstring"""
UpperCamelCase : List[str] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 364
|
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : Any = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 2_6
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCamelCase : List[Any] = True
elif char.isupper():
UpperCamelCase : List[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def a ( ):
"""simple docstring"""
from timeit import timeit
UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 315
| 0
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
UpperCamelCase : List[Any] = parent
UpperCamelCase : List[str] = batch_size
UpperCamelCase : str = decoder_seq_length
# For common tests
UpperCamelCase : Optional[int] = self.decoder_seq_length
UpperCamelCase : Optional[int] = is_training
UpperCamelCase : List[str] = use_attention_mask
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Dict = vocab_size
UpperCamelCase : int = d_model
UpperCamelCase : List[str] = d_model
UpperCamelCase : List[str] = decoder_layers
UpperCamelCase : str = decoder_layers
UpperCamelCase : Dict = decoder_ffn_dim
UpperCamelCase : Any = decoder_attention_heads
UpperCamelCase : int = decoder_attention_heads
UpperCamelCase : Dict = eos_token_id
UpperCamelCase : Any = bos_token_id
UpperCamelCase : List[str] = pad_token_id
UpperCamelCase : Optional[int] = decoder_start_token_id
UpperCamelCase : Dict = use_cache
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : str = None
UpperCamelCase : Optional[Any] = decoder_seq_length
UpperCamelCase : List[str] = 2
UpperCamelCase : List[str] = 1
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase : Optional[Any] = None
if self.use_attention_mask:
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
UpperCamelCase : str = None
if self.use_labels:
UpperCamelCase : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Tuple = TrOCRDecoder(config=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = model(__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) + 1 )
UpperCamelCase : Tuple = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
UpperCamelCase : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCamelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
# select random slice
UpperCamelCase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase : Optional[Any] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCamelCase : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.prepare_config_and_inputs()
UpperCamelCase : int = config_and_inputs
UpperCamelCase : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
__UpperCamelCase : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
__UpperCamelCase : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[Any] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = TrOCRStandaloneDecoderModelTester(self , is_training=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
"""simple docstring"""
pass
| 365
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase : Union[str, Any] = logging.getLogger()
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase : List[str] = parser.parse_args()
return args.f
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 )
@slow
@require_torch_non_multi_gpu
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
| 315
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
while b:
UpperCamelCase : str = b, a % b
return a
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE_ , a % b )
def a ( ):
"""simple docstring"""
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 366
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = "ibert"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = quant_mode
UpperCamelCase : Any = force_dequant
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 315
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = "perceiver"
def __init__( self , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=1_280 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=26 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="kv" , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=262 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=56 , __SCREAMING_SNAKE_CASE=[368, 496] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1_920 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=[1, 16, 224, 224] , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = num_latents
UpperCamelCase : int = d_latents
UpperCamelCase : Tuple = d_model
UpperCamelCase : Dict = num_blocks
UpperCamelCase : Optional[int] = num_self_attends_per_block
UpperCamelCase : Any = num_self_attention_heads
UpperCamelCase : Dict = num_cross_attention_heads
UpperCamelCase : List[Any] = qk_channels
UpperCamelCase : Optional[Any] = v_channels
UpperCamelCase : Any = cross_attention_shape_for_attention
UpperCamelCase : List[str] = self_attention_widening_factor
UpperCamelCase : Union[str, Any] = cross_attention_widening_factor
UpperCamelCase : Optional[Any] = hidden_act
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : Any = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Union[str, Any] = use_query_residual
# masked language modeling attributes
UpperCamelCase : List[Any] = vocab_size
UpperCamelCase : Optional[int] = max_position_embeddings
# image classification attributes
UpperCamelCase : Optional[int] = image_size
# flow attributes
UpperCamelCase : List[Any] = train_size
# multimodal autoencoding attributes
UpperCamelCase : Optional[int] = num_frames
UpperCamelCase : int = audio_samples_per_frame
UpperCamelCase : List[str] = samples_per_patch
UpperCamelCase : Any = output_shape
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 40 , __SCREAMING_SNAKE_CASE = 40 , ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase : Tuple = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase : Optional[Any] = preprocessor.num_special_tokens_to_add(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase : Dict = [''' '''.join(['''a'''] ) * seq_length] * batch_size
UpperCamelCase : int = dict(preprocessor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Any = inputs.pop('''input_ids''' )
return inputs
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase : str = compute_effective_axis_dimension(__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCamelCase : Tuple = self._generate_dummy_images(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : str = dict(preprocessor(images=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Optional[int] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 367
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__UpperCAmelCase : int = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : int = []
UpperCamelCase : List[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase : Tuple = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) )
UpperCamelCase : Optional[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' )
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[str] = []
UpperCamelCase : str = []
for element in html_code.descendants:
if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase : Any = html.unescape(__SCREAMING_SNAKE_CASE ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : int = self.xpath_soup(__SCREAMING_SNAKE_CASE )
stringaxtag_seq.append(__SCREAMING_SNAKE_CASE )
stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ''''''
for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = False
# Check that strings has a valid type
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = True
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"""but is of type {type(__SCREAMING_SNAKE_CASE )}.""" )
UpperCamelCase : int = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) )
if not is_batched:
UpperCamelCase : Union[str, Any] = [html_strings]
# Get nodes + xpaths
UpperCamelCase : str = []
UpperCamelCase : int = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.get_three_from_single(__SCREAMING_SNAKE_CASE )
nodes.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
xpath_strings.append(__SCREAMING_SNAKE_CASE )
xpaths.append(__SCREAMING_SNAKE_CASE )
# return as Dict
UpperCamelCase : List[str] = {'''nodes''': nodes, '''xpaths''': xpaths}
UpperCamelCase : List[Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 315
| 0
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : int = "xlnet"
__UpperCamelCase : int = ["mems"]
__UpperCamelCase : int = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __SCREAMING_SNAKE_CASE=32_000 , __SCREAMING_SNAKE_CASE=1_024 , __SCREAMING_SNAKE_CASE=24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=4_096 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="bi" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=-1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="last" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="tanh" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Tuple = vocab_size
UpperCamelCase : Dict = d_model
UpperCamelCase : List[Any] = n_layer
UpperCamelCase : Optional[int] = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
UpperCamelCase : Any = d_model // n_head
UpperCamelCase : Optional[Any] = ff_activation
UpperCamelCase : Any = d_inner
UpperCamelCase : Union[str, Any] = untie_r
UpperCamelCase : Any = attn_type
UpperCamelCase : Dict = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Optional[int] = dropout
UpperCamelCase : Any = mem_len
UpperCamelCase : Union[str, Any] = reuse_len
UpperCamelCase : Any = bi_data
UpperCamelCase : Optional[int] = clamp_len
UpperCamelCase : List[Any] = same_length
UpperCamelCase : Optional[Any] = summary_type
UpperCamelCase : int = summary_use_proj
UpperCamelCase : Tuple = summary_activation
UpperCamelCase : Dict = summary_last_dropout
UpperCamelCase : Any = start_n_top
UpperCamelCase : int = end_n_top
UpperCamelCase : Optional[Any] = bos_token_id
UpperCamelCase : int = pad_token_id
UpperCamelCase : Any = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
UpperCamelCase : Any = kwargs['''use_cache''']
UpperCamelCase : Optional[Any] = use_mems_eval
UpperCamelCase : str = use_mems_train
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ):
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 368
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__UpperCAmelCase : List[str] = getLogger(__name__)
__UpperCAmelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : int="summarization" , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Any , ):
"""simple docstring"""
UpperCamelCase : Dict = Path(SCREAMING_SNAKE_CASE_ ).open('''w''' , encoding='''utf-8''' )
UpperCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if fpaa:
UpperCamelCase : List[Any] = model.half()
UpperCamelCase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
UpperCamelCase : int = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if prefix is None:
UpperCamelCase : Union[str, Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ):
UpperCamelCase : Optional[int] = [prefix + text for text in examples_chunk]
UpperCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ , padding='''longest''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
UpperCamelCase : str = int(time.time() - start_time ) # seconds
UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
"""simple docstring"""
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=SCREAMING_SNAKE_CASE_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=SCREAMING_SNAKE_CASE_ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=SCREAMING_SNAKE_CASE_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=SCREAMING_SNAKE_CASE_ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
UpperCamelCase , UpperCamelCase : int = parser.parse_known_args()
UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
UpperCamelCase : str = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
UpperCamelCase : Tuple = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
UpperCamelCase : str = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE_ , )
if args.reference_path is None:
return {}
# Compute scores
UpperCamelCase : Tuple = calculate_bleu if '''translation''' in args.task else calculate_rouge
UpperCamelCase : Dict = [x.rstrip() for x in open(args.save_path ).readlines()]
UpperCamelCase : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase : dict = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
scores.update(SCREAMING_SNAKE_CASE_ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE_ )
if args.info:
UpperCamelCase : Optional[Any] = args.info
if verbose:
print(SCREAMING_SNAKE_CASE_ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE_ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 315
| 0
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Union[str, Any] = ""
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('''Processing...''' )
UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index, image in enumerate(SCREAMING_SNAKE_CASE_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase : Optional[int] = random_chars(3_2 )
UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" )
UpperCamelCase : Any = []
for anno in new_annos[index]:
UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(F"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ):
UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
UpperCamelCase : List[str] = in_file.readlines()
UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" )
UpperCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : str = []
UpperCamelCase : int = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = []
UpperCamelCase : Optional[int] = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = anno_list[idx]
UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ )
if flip_type == 1:
UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE_ )
new_imgs_list.append(SCREAMING_SNAKE_CASE_ )
return new_imgs_list, new_annos_lists, path_list
def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase : Any = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 369
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : int = ["image_processor", "tokenizer"]
__UpperCamelCase : List[str] = "AutoImageProcessor"
__UpperCamelCase : Optional[Any] = "AutoTokenizer"
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
UpperCamelCase : Any = kwargs.pop('''feature_extractor''' )
UpperCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.image_processor
UpperCamelCase : int = False
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = kwargs.pop('''images''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = kwargs.pop('''text''' , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase : Union[str, Any] = args[0]
UpperCamelCase : str = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
UpperCamelCase : List[str] = self.image_processor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
UpperCamelCase : Optional[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase : List[str] = encodings['''input_ids''']
return inputs
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@contextmanager
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
UpperCamelCase : Any = True
UpperCamelCase : int = self.tokenizer
yield
UpperCamelCase : List[Any] = self.image_processor
UpperCamelCase : Tuple = False
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if added_vocab is None:
UpperCamelCase : str = self.tokenizer.get_added_vocab()
UpperCamelCase : int = {}
while tokens:
UpperCamelCase : Dict = re.search(R'''<s_(.*?)>''' , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
UpperCamelCase : List[str] = start_token.group(1 )
UpperCamelCase : Dict = re.search(Rf"""</s_{key}>""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
UpperCamelCase : Any = start_token.group()
if end_token is None:
UpperCamelCase : Optional[int] = tokens.replace(__SCREAMING_SNAKE_CASE , '''''' )
else:
UpperCamelCase : Dict = end_token.group()
UpperCamelCase : int = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
UpperCamelCase : Dict = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
UpperCamelCase : Tuple = self.tokenajson(__SCREAMING_SNAKE_CASE , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if value:
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : str = value[0]
UpperCamelCase : str = value
else: # leaf nodes
UpperCamelCase : Optional[int] = []
for leaf in content.split(R'''<sep/>''' ):
UpperCamelCase : Optional[int] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
UpperCamelCase : int = leaf[1:-2] # for categorical special tokens
output[key].append(__SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
UpperCamelCase : Tuple = output[key][0]
UpperCamelCase : List[Any] = tokens[tokens.find(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 315
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase : Tuple = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = ["ConvNextFeatureExtractor"]
__UpperCAmelCase : Any = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : int = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 370
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 315
| 0
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__UpperCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
UpperCamelCase : int = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = dict(scheduler.config )
UpperCamelCase : int = 1
UpperCamelCase : Any = FrozenDict(__SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
UpperCamelCase : Dict = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = dict(scheduler.config )
UpperCamelCase : Optional[int] = True
UpperCamelCase : int = FrozenDict(__SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=__SCREAMING_SNAKE_CASE , segmentation_processor=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
self.enable_attention_slicing(__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCamelCase : Optional[Any] = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self ):
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__SCREAMING_SNAKE_CASE , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 50 , __SCREAMING_SNAKE_CASE = 7.5 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
UpperCamelCase : List[str] = self.segmentation_model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCamelCase : Optional[int] = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCamelCase : List[str] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , )
| 371
|
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ):
"""simple docstring"""
UpperCamelCase : List[str] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 315
| 0
|
from collections.abc import Sequence
def a ( SCREAMING_SNAKE_CASE_ : Sequence[float] , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE_ ) )
def a ( SCREAMING_SNAKE_CASE_ : Sequence[float] , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = 0.0
for coeff in reversed(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = result * x + coeff
return result
if __name__ == "__main__":
__UpperCAmelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0)
__UpperCAmelCase : Dict = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 350
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = eval_examples
UpperCamelCase : Optional[Any] = post_process_function
def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ):
"""simple docstring"""
UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Any = self.compute_metrics
UpperCamelCase : List[Any] = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Dict = time.time()
try:
UpperCamelCase : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : Union[str, Any] = compute_metrics
UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions )
UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
UpperCamelCase : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ):
"""simple docstring"""
UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Union[str, Any] = self.compute_metrics
UpperCamelCase : Tuple = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Optional[int] = time.time()
try:
UpperCamelCase : int = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : int = compute_metrics
UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' )
UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 315
| 0
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__UpperCAmelCase : List[str] = getLogger(__name__)
__UpperCAmelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : int="summarization" , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Any , ):
"""simple docstring"""
UpperCamelCase : Dict = Path(SCREAMING_SNAKE_CASE_ ).open('''w''' , encoding='''utf-8''' )
UpperCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if fpaa:
UpperCamelCase : List[Any] = model.half()
UpperCamelCase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
UpperCamelCase : int = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if prefix is None:
UpperCamelCase : Union[str, Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ):
UpperCamelCase : Optional[int] = [prefix + text for text in examples_chunk]
UpperCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ , padding='''longest''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
UpperCamelCase : str = int(time.time() - start_time ) # seconds
UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
"""simple docstring"""
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=SCREAMING_SNAKE_CASE_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=SCREAMING_SNAKE_CASE_ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=SCREAMING_SNAKE_CASE_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=SCREAMING_SNAKE_CASE_ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
UpperCamelCase : int = parser.parse_known_args()
UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
UpperCamelCase : str = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
UpperCamelCase : Tuple = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
UpperCamelCase : str = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE_ , )
if args.reference_path is None:
return {}
# Compute scores
UpperCamelCase : Tuple = calculate_bleu if '''translation''' in args.task else calculate_rouge
UpperCamelCase : Dict = [x.rstrip() for x in open(args.save_path ).readlines()]
UpperCamelCase : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase : dict = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
scores.update(SCREAMING_SNAKE_CASE_ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE_ )
if args.info:
UpperCamelCase : Optional[Any] = args.info
if verbose:
print(SCREAMING_SNAKE_CASE_ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE_ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 351
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(sorted(SCREAMING_SNAKE_CASE_ ) )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return word_by_signature[signature(SCREAMING_SNAKE_CASE_ )]
__UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__UpperCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()})
__UpperCAmelCase : Union[str, Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__UpperCAmelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 315
| 0
|
import string
import numpy
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Any = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__UpperCamelCase : List[str] = numpy.vectorize(lambda _a: x % 36)
__UpperCamelCase : Dict = numpy.vectorize(_a)
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.modulus(__SCREAMING_SNAKE_CASE ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
UpperCamelCase : Dict = encrypt_key.shape[0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.key_string.index(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.key_string[round(__SCREAMING_SNAKE_CASE )]
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCamelCase : Union[str, Any] = det % len(self.key_string )
UpperCamelCase : int = len(self.key_string )
if greatest_common_divisor(__SCREAMING_SNAKE_CASE , len(self.key_string ) ) != 1:
UpperCamelCase : Any = (
f"""determinant modular {req_l} of encryption key({det}) """
f"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = [char for char in text.upper() if char in self.key_string]
UpperCamelCase : Union[str, Any] = chars[-1]
while len(__SCREAMING_SNAKE_CASE ) % self.break_key != 0:
chars.append(__SCREAMING_SNAKE_CASE )
return "".join(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = self.process_text(text.upper() )
UpperCamelCase : List[str] = ''''''
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) - self.break_key + 1 , self.break_key ):
UpperCamelCase : List[Any] = text[i : i + self.break_key]
UpperCamelCase : Optional[int] = [self.replace_letters(__SCREAMING_SNAKE_CASE ) for char in batch]
UpperCamelCase : int = numpy.array([vec] ).T
UpperCamelCase : Any = self.modulus(self.encrypt_key.dot(__SCREAMING_SNAKE_CASE ) ).T.tolist()[
0
]
UpperCamelCase : List[str] = ''''''.join(
self.replace_digits(__SCREAMING_SNAKE_CASE ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCamelCase : Dict = det % len(self.key_string )
UpperCamelCase : Any = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
UpperCamelCase : str = i
break
UpperCamelCase : Any = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = self.make_decrypt_key()
UpperCamelCase : Optional[int] = self.process_text(text.upper() )
UpperCamelCase : Optional[Any] = ''''''
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) - self.break_key + 1 , self.break_key ):
UpperCamelCase : int = text[i : i + self.break_key]
UpperCamelCase : Any = [self.replace_letters(__SCREAMING_SNAKE_CASE ) for char in batch]
UpperCamelCase : str = numpy.array([vec] ).T
UpperCamelCase : Optional[int] = self.modulus(decrypt_key.dot(__SCREAMING_SNAKE_CASE ) ).T.tolist()[0]
UpperCamelCase : List[str] = ''''''.join(
self.replace_digits(__SCREAMING_SNAKE_CASE ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a ( ):
"""simple docstring"""
UpperCamelCase : List[str] = int(input('''Enter the order of the encryption key: ''' ) )
UpperCamelCase : List[str] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = [int(SCREAMING_SNAKE_CASE_ ) for x in input().split()]
hill_matrix.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = HillCipher(numpy.array(SCREAMING_SNAKE_CASE_ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
UpperCamelCase : Tuple = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
UpperCamelCase : Tuple = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(SCREAMING_SNAKE_CASE_ ) )
elif option == "2":
UpperCamelCase : int = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 352
|
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) )
return data_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
score_lists.append(SCREAMING_SNAKE_CASE_ )
return score_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = final_scores[j] + ele
return final_scores
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
source_data[i].append(SCREAMING_SNAKE_CASE_ )
return source_data
| 315
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__UpperCAmelCase : Union[str, Any] = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 353
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Union[str, Any] = ""
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def a ( ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('''Processing...''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index, image in enumerate(SCREAMING_SNAKE_CASE_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase : Optional[int] = random_chars(3_2 )
UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" )
UpperCamelCase : Any = []
for anno in new_annos[index]:
UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(F"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ):
UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
UpperCamelCase : List[str] = in_file.readlines()
UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" )
UpperCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : str = []
UpperCamelCase : int = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = []
UpperCamelCase : Optional[int] = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = anno_list[idx]
UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ )
if flip_type == 1:
UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE_ )
new_imgs_list.append(SCREAMING_SNAKE_CASE_ )
return new_imgs_list, new_annos_lists, path_list
def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase : Any = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 315
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Any = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = "deit"
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=16 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = qkv_bias
UpperCamelCase = encoder_stride
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
| 354
|
import qiskit
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : int = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 315
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : Dict = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
UpperCamelCase : Dict = SwinConfig(
embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
UpperCamelCase : List[str] = DetaConfig(
backbone_config=SCREAMING_SNAKE_CASE_ , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=SCREAMING_SNAKE_CASE_ , with_box_refine=SCREAMING_SNAKE_CASE_ , two_stage=SCREAMING_SNAKE_CASE_ , )
# set labels
UpperCamelCase : Any = '''huggingface/label-files'''
if "o365" in model_name:
UpperCamelCase : List[str] = 3_6_6
UpperCamelCase : Tuple = '''object365-id2label.json'''
else:
UpperCamelCase : Optional[Any] = 9_1
UpperCamelCase : Dict = '''coco-detection-id2label.json'''
UpperCamelCase : List[Any] = num_labels
UpperCamelCase : List[str] = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCamelCase : int = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCamelCase : Union[str, Any] = idalabel
UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
UpperCamelCase : Optional[int] = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.reduction.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.bias""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", F"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", F"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", F"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", F"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", F"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", F"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.weight""", F"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.weight""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.weight""", F"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.bias""", F"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ):
UpperCamelCase : Union[str, Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = val
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
UpperCamelCase : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase : Tuple = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCamelCase : str = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase : List[Any] = in_proj_weight[:dim, :]
UpperCamelCase : int = in_proj_bias[: dim]
UpperCamelCase : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase : str = in_proj_bias[
dim : dim * 2
]
UpperCamelCase : str = in_proj_weight[
-dim :, :
]
UpperCamelCase : Any = in_proj_bias[-dim :]
# fmt: on
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
UpperCamelCase : Optional[int] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase : Tuple = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase : str = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase : List[Any] = in_proj_weight[:hidden_size, :]
UpperCamelCase : Dict = in_proj_bias[:hidden_size]
UpperCamelCase : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCamelCase : str = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase : int = in_proj_weight[-hidden_size:, :]
UpperCamelCase : List[str] = in_proj_bias[-hidden_size:]
def a ( ):
UpperCamelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase : Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ):
UpperCamelCase : List[str] = get_deta_config(SCREAMING_SNAKE_CASE_ )
# load original state dict
if model_name == "deta-swin-large":
UpperCamelCase : Optional[int] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
UpperCamelCase : Union[str, Any] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
UpperCamelCase : List[str] = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE_ , param.shape )
# rename keys
UpperCamelCase : Dict = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE_ , config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCamelCase : Any = state_dict.pop(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = val
if "input_proj" in key:
UpperCamelCase : Tuple = state_dict.pop(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCamelCase : str = state_dict.pop(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = val
# finally, create HuggingFace model and load state dict
UpperCamelCase : List[Any] = DetaForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(SCREAMING_SNAKE_CASE_ )
# load image processor
UpperCamelCase : Optional[int] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
UpperCamelCase : Tuple = prepare_img()
UpperCamelCase : Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
UpperCamelCase : List[Any] = encoding['''pixel_values''']
UpperCamelCase : Optional[int] = model(pixel_values.to(SCREAMING_SNAKE_CASE_ ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCamelCase : Optional[int] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
UpperCamelCase : Tuple = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
UpperCamelCase : Optional[Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
UpperCamelCase : List[Any] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(SCREAMING_SNAKE_CASE_ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(SCREAMING_SNAKE_CASE_ ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(F"""jozhang97/{model_name}""" )
processor.push_to_hub(F"""jozhang97/{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCAmelCase : int = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 355
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = CLIPConfig
__UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"]
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = CLIPVisionModel(config.vision_config )
UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy()
UpperCamelCase : Dict = []
UpperCamelCase : List[str] = image_embeds.shape[0]
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Optional[int] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase : List[str] = special_cos_dist[i][concept_idx]
UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCamelCase : Optional[int] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase : Optional[int] = cos_dist[i][concept_idx]
UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds )
UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Union[str, Any] = 0.0
UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
UpperCamelCase : int = special_care * 0.01
UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 315
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=0 , ):
"""simple docstring"""
UpperCamelCase : Any = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : str = seq_length
UpperCamelCase : Dict = is_training
UpperCamelCase : Dict = use_input_mask
UpperCamelCase : Optional[int] = use_token_type_ids
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : List[Any] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Optional[Any] = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : List[Any] = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : Tuple = max_position_embeddings
UpperCamelCase : List[str] = type_vocab_size
UpperCamelCase : List[Any] = type_sequence_label_size
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Optional[int] = num_labels
UpperCamelCase : Optional[int] = num_choices
UpperCamelCase : Optional[int] = scope
UpperCamelCase : Union[str, Any] = projection_dim
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
UpperCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Any = None
if self.use_token_type_ids:
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : str = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Union[str, Any] = None
if self.use_labels:
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Optional[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
UpperCamelCase : Any = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = TFDPRContextEncoder(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = TFDPRQuestionEncoder(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = TFDPRReader(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
(
UpperCamelCase
) : List[Any] = config_and_inputs
UpperCamelCase : Union[str, Any] = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[str] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__UpperCamelCase : Optional[int] = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__UpperCamelCase : int = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Any = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : Dict = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = TFDPRModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : List[str] = TFDPRContextEncoder.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[int] = TFDPRContextEncoder.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Union[str, Any] = TFDPRQuestionEncoder.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : List[str] = TFDPRReader.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
UpperCamelCase : Optional[int] = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
UpperCamelCase : int = model(__SCREAMING_SNAKE_CASE )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
UpperCamelCase : Optional[Any] = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 356
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
UpperCamelCase : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 315
| 0
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
__UpperCAmelCase : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase : List[str] = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
__UpperCAmelCase : int = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
__UpperCAmelCase : List[str] = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
__UpperCAmelCase : int = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__UpperCAmelCase : List[Any] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__UpperCAmelCase : Union[str, Any] = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : int = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Any = DPRContextEncoderTokenizer
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Dict = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Tuple = DPRQuestionEncoderTokenizer
__UpperCAmelCase : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__UpperCAmelCase : Any = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__UpperCAmelCase : Tuple = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a)
class UpperCAmelCase_ :
'''simple docstring'''
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
elif titles is None or texts is None:
UpperCamelCase : Dict = titles if texts is None else texts
return super().__call__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Tuple = titles if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else [titles]
UpperCamelCase : List[str] = texts if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else [texts]
UpperCamelCase : int = len(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = questions if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else [questions] * n_passages
assert len(__SCREAMING_SNAKE_CASE ) == len(
__SCREAMING_SNAKE_CASE ), f"""There should be as many titles than texts but got {len(__SCREAMING_SNAKE_CASE )} titles and {len(__SCREAMING_SNAKE_CASE )} texts."""
UpperCamelCase : Optional[int] = super().__call__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCamelCase : Tuple = super().__call__(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCamelCase : int = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
UpperCamelCase : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCamelCase : int = attention_mask
return self.pad(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 , __SCREAMING_SNAKE_CASE = 64 , __SCREAMING_SNAKE_CASE = 4 , ):
"""simple docstring"""
UpperCamelCase : Any = reader_input['''input_ids''']
UpperCamelCase : List[Any] = reader_output[:3]
UpperCamelCase : Tuple = len(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = sorted(range(__SCREAMING_SNAKE_CASE ) , reverse=__SCREAMING_SNAKE_CASE , key=relevance_logits.__getitem__ )
UpperCamelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCamelCase : Any = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCamelCase : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCamelCase : Optional[Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCamelCase : Tuple = len(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__SCREAMING_SNAKE_CASE , top_spans=__SCREAMING_SNAKE_CASE , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__SCREAMING_SNAKE_CASE , start_index=__SCREAMING_SNAKE_CASE , end_index=__SCREAMING_SNAKE_CASE , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Dict = []
for start_index, start_score in enumerate(__SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCamelCase : int = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x[1] , reverse=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
UpperCamelCase : Optional[Any] = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a)
class UpperCAmelCase_ ( _a, _a):
'''simple docstring'''
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Any = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = READER_PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"]
__UpperCamelCase : str = DPRReaderTokenizer
| 357
|
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315
| 0
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = CLIPConfig
__UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"]
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = CLIPVisionModel(config.vision_config )
UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy()
UpperCamelCase : Dict = []
UpperCamelCase : List[str] = image_embeds.shape[0]
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Optional[int] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase : List[str] = special_cos_dist[i][concept_idx]
UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCamelCase : Optional[int] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase : Optional[int] = cos_dist[i][concept_idx]
UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds )
UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Union[str, Any] = 0.0
UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
UpperCamelCase : int = special_care * 0.01
UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 358
|
import math
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = factor * value
UpperCamelCase : Optional[int] = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 315
| 0
|
import math
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = factor * value
UpperCamelCase : Optional[int] = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 359
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 315
| 0
|
from __future__ import annotations
from math import ceil, floor, sqrt
def a ( SCREAMING_SNAKE_CASE_ : int = 2_0_0_0_0_0_0 ):
"""simple docstring"""
UpperCamelCase : list[int] = [0]
UpperCamelCase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
UpperCamelCase : int = 0
# the area corresponding to the grid that gives the product closest to target
UpperCamelCase : int = 0
# an estimate of b, using the quadratic formula
UpperCamelCase : float
# the largest integer less than b_estimate
UpperCamelCase : int
# the largest integer less than b_estimate
UpperCamelCase : int
# the triangle number corresponding to b_floor
UpperCamelCase : int
# the triangle number corresponding to b_ceil
UpperCamelCase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
UpperCamelCase : List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
UpperCamelCase : Optional[int] = floor(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = ceil(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = triangle_numbers[b_floor]
UpperCamelCase : List[str] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
UpperCamelCase : str = triangle_b_first_guess * triangle_a
UpperCamelCase : List[Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
UpperCamelCase : List[Any] = triangle_b_second_guess * triangle_a
UpperCamelCase : Union[str, Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 360
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : List[str] = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Union[str, Any] = min_resolution
UpperCamelCase : Tuple = max_resolution
UpperCamelCase : List[str] = do_resize
UpperCamelCase : List[str] = size
UpperCamelCase : int = apply_ocr
def _lowercase ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE )
# Test batched
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : Optional[int] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
UpperCamelCase : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase : Union[str, Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCamelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE )
# with apply_OCR = False
UpperCamelCase : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 315
| 0
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=1 , ):
"""simple docstring"""
UpperCamelCase : List[Any] = parent
UpperCamelCase : List[str] = batch_size
UpperCamelCase : Optional[int] = seq_length
UpperCamelCase : Any = is_training
UpperCamelCase : List[Any] = use_input_mask
UpperCamelCase : Tuple = use_token_type_ids
UpperCamelCase : Optional[Any] = use_labels
UpperCamelCase : Tuple = vocab_size
UpperCamelCase : str = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Tuple = intermediate_size
UpperCamelCase : Any = hidden_act
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : List[Any] = type_vocab_size
UpperCamelCase : Optional[Any] = type_sequence_label_size
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : List[Any] = num_labels
UpperCamelCase : str = num_choices
UpperCamelCase : List[Any] = scope
UpperCamelCase : Tuple = q_groups
UpperCamelCase : List[Any] = k_groups
UpperCamelCase : Any = v_groups
UpperCamelCase : str = post_attention_groups
UpperCamelCase : Dict = intermediate_groups
UpperCamelCase : List[str] = output_groups
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : List[str] = None
if self.use_input_mask:
UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : List[str] = None
UpperCamelCase : Dict = None
UpperCamelCase : Dict = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = SqueezeBertModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : int = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = SqueezeBertForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = SqueezeBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : str = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = self.num_labels
UpperCamelCase : Tuple = SqueezeBertForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.num_labels
UpperCamelCase : Tuple = SqueezeBertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.num_choices
UpperCamelCase : Dict = SqueezeBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Dict = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.prepare_config_and_inputs()
(UpperCamelCase) : Dict = config_and_inputs
UpperCamelCase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCamelCase : List[str] = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : List[str] = False
__UpperCamelCase : int = True
__UpperCamelCase : Any = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = SqueezeBertModelTester(self )
UpperCamelCase : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , dim=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : int = SqueezeBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
UpperCamelCase : List[Any] = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase : Tuple = torch.Size((1, 3) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 361
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a ( SCREAMING_SNAKE_CASE_ : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = fetch_california_housing()
UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 315
| 0
|
import functools
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Dict = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
@functools.cache
def min_distance(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCamelCase : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , SCREAMING_SNAKE_CASE_ ) , 1 + min_distance(SCREAMING_SNAKE_CASE_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase : Union[str, Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 315
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : list[list[str]] = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase : Union[str, Any] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(SCREAMING_SNAKE_CASE_ ) <= key:
return input_string
for position, character in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = position % (lowest * 2) # puts it in bounds
UpperCamelCase : Tuple = min(SCREAMING_SNAKE_CASE_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = [''''''.join(SCREAMING_SNAKE_CASE_ ) for row in temp_grid]
UpperCamelCase : List[str] = ''''''.join(SCREAMING_SNAKE_CASE_ )
return output_string
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Dict = []
UpperCamelCase : Tuple = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
UpperCamelCase : list[list[str]] = [[] for _ in range(SCREAMING_SNAKE_CASE_ )] # generates template
for position in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = position % (lowest * 2) # puts it in bounds
UpperCamelCase : List[Any] = min(SCREAMING_SNAKE_CASE_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
UpperCamelCase : Optional[int] = 0
for row in temp_grid: # fills in the characters
UpperCamelCase : Any = input_string[counter : counter + len(SCREAMING_SNAKE_CASE_ )]
grid.append(list(SCREAMING_SNAKE_CASE_ ) )
counter += len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = '''''' # reads as zigzag
for position in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : List[str] = position % (lowest * 2) # puts it in bounds
UpperCamelCase : List[str] = min(SCREAMING_SNAKE_CASE_ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Any = {}
for key_guess in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): # tries every key
UpperCamelCase : int = decrypt(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
import collections
import os
import re
from pathlib import Path
__UpperCAmelCase : List[str] = "src/transformers"
# Matches is_xxx_available()
__UpperCAmelCase : int = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase : Optional[int] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase : List[Any] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__UpperCAmelCase : List[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase : str = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase : Dict = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase : str = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__UpperCAmelCase : Any = re.compile(r"^\s*try:")
# Catches a line with else:
__UpperCAmelCase : List[Any] = re.compile(r"^\s*else:")
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCamelCase : Tuple = f.readlines()
UpperCamelCase : Tuple = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase : List[Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCamelCase : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
UpperCamelCase : str = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCamelCase : List[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCamelCase : str = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
UpperCamelCase : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase : int = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCamelCase : Tuple = lines[line_index]
UpperCamelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase : Any = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCamelCase : Optional[Any] = lines[line_index]
UpperCamelCase : str = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
UpperCamelCase : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase : Dict = []
for key in import_dict_objects.keys():
UpperCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase : List[str] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' )
UpperCamelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
UpperCamelCase : str = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = short_path.replace(os.path.sep , '''.''' )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
__UpperCAmelCase : Optional[int] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def a ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
UpperCamelCase : Tuple = direct_transformers_import(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , '''r''' ) as f:
UpperCamelCase : List[Any] = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : str = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 315
| 0
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=0.01 , __SCREAMING_SNAKE_CASE=1_000 ):
"""simple docstring"""
UpperCamelCase : Dict = p_stop
UpperCamelCase : Any = max_length
def __iter__( self ):
"""simple docstring"""
UpperCamelCase : str = 0
UpperCamelCase : List[str] = False
while not stop and count < self.max_length:
yield count
count += 1
UpperCamelCase : Optional[Any] = random.random() < self.p_stop
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [
BatchSamplerShard(__SCREAMING_SNAKE_CASE , 2 , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
for i in range(2 )
]
UpperCamelCase : str = [list(__SCREAMING_SNAKE_CASE ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__SCREAMING_SNAKE_CASE ) for shard in batch_sampler_shards] , [len(__SCREAMING_SNAKE_CASE ) for e in expected] )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = BatchSampler(range(24 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCamelCase : Dict = BatchSampler(range(21 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = BatchSampler(range(21 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCamelCase : Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCamelCase : str = BatchSampler(range(20 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : int = BatchSampler(range(20 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
UpperCamelCase : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = [[], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCamelCase : Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCamelCase : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
UpperCamelCase : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = [[], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCamelCase : Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCamelCase : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCamelCase : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
UpperCamelCase : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = [[], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = BatchSampler(range(24 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = BatchSampler(range(24 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCamelCase : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCamelCase : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
UpperCamelCase : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = [[], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
UpperCamelCase : int = [BatchSamplerShard(__SCREAMING_SNAKE_CASE , 2 , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
random.seed(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = [
IterableDatasetShard(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , drop_last=__SCREAMING_SNAKE_CASE , num_processes=__SCREAMING_SNAKE_CASE , process_index=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , )
for i in range(__SCREAMING_SNAKE_CASE )
]
UpperCamelCase : Optional[Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__SCREAMING_SNAKE_CASE )
iterable_dataset_lists.append(list(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Optional[int] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
UpperCamelCase : str = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
self.assertTrue(len(__SCREAMING_SNAKE_CASE ) % shard_batch_size == 0 )
UpperCamelCase : int = []
for idx in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__SCREAMING_SNAKE_CASE ) < len(__SCREAMING_SNAKE_CASE ):
reference += reference
self.assertListEqual(__SCREAMING_SNAKE_CASE , reference[: len(__SCREAMING_SNAKE_CASE )] )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = 42
UpperCamelCase : Dict = RandomIterableDataset()
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
# Edge case with a very small dataset
UpperCamelCase : Dict = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = BatchSampler(range(16 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = SkipBatchSampler(__SCREAMING_SNAKE_CASE , 2 )
self.assertListEqual(list(__SCREAMING_SNAKE_CASE ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = DataLoader(list(range(16 ) ) , batch_size=4 )
UpperCamelCase : Union[str, Any] = skip_first_batches(__SCREAMING_SNAKE_CASE , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _lowercase ( self ):
"""simple docstring"""
Accelerator()
UpperCamelCase : int = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 364
|
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : Any = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 2_6
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCamelCase : List[Any] = True
elif char.isupper():
UpperCamelCase : List[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def a ( ):
"""simple docstring"""
from timeit import timeit
UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 315
| 0
|
import operator as op
__UpperCAmelCase : List[str] = "scaler.pt"
__UpperCAmelCase : Any = "pytorch_model"
__UpperCAmelCase : Any = "random_states"
__UpperCAmelCase : List[Any] = "optimizer"
__UpperCAmelCase : Optional[int] = "scheduler"
__UpperCAmelCase : str = "pytorch_model.bin"
__UpperCAmelCase : Any = "pytorch_model.bin.index.json"
__UpperCAmelCase : Any = "model.safetensors"
__UpperCAmelCase : List[Any] = "model.safetensors.index.json"
__UpperCAmelCase : Any = "1.10.2"
__UpperCAmelCase : Any = "py38"
__UpperCAmelCase : Union[str, Any] = "4.17.0"
__UpperCAmelCase : Tuple = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
__UpperCAmelCase : int = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
__UpperCAmelCase : Optional[int] = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
__UpperCAmelCase : Any = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
__UpperCAmelCase : List[Any] = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
__UpperCAmelCase : List[str] = "2.0.1"
__UpperCAmelCase : Optional[int] = ["pdsh", "standard", "openmpi", "mvapich"]
__UpperCAmelCase : Optional[Any] = ["default", "reduce-overhead", "max-autotune"]
__UpperCAmelCase : str = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__UpperCAmelCase : Tuple = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
__UpperCAmelCase : List[str] = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
__UpperCAmelCase : List[str] = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 365
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase : Union[str, Any] = logging.getLogger()
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase : List[str] = parser.parse_args()
return args.f
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 )
@slow
@require_torch_non_multi_gpu
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
| 315
| 0
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 366
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = "ibert"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = quant_mode
UpperCamelCase : Any = force_dequant
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 315
| 0
|
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = parent
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : int = batch_size
UpperCamelCase : str = image_size
UpperCamelCase : Dict = patch_size
UpperCamelCase : Optional[Any] = num_channels
UpperCamelCase : str = is_training
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : List[str] = intermediate_size
UpperCamelCase : Any = hidden_act
UpperCamelCase : Dict = hidden_dropout_prob
UpperCamelCase : Optional[int] = attention_probs_dropout_prob
UpperCamelCase : int = type_sequence_label_size
UpperCamelCase : Union[str, Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase : Tuple = (image_size // patch_size) ** 2
UpperCamelCase : List[Any] = num_patches + 1
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Optional[int] = None
if self.use_labels:
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = FlaxBeitModel(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = FlaxBeitForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.type_sequence_label_size
UpperCamelCase : List[str] = FlaxBeitForImageClassification(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase : Optional[int] = 1
UpperCamelCase : Any = FlaxBeitForImageClassification(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
UpperCamelCase
) : Optional[Any] = config_and_inputs
UpperCamelCase : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[str] = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = FlaxBeitModelTester(self )
UpperCamelCase : Union[str, Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Dict = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Tuple = [*signature.parameters.keys()]
UpperCamelCase : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase : Any = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = model_class(__SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return model(pixel_values=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
with self.subTest('''JIT Enabled''' ):
UpperCamelCase : Optional[int] = model_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCamelCase : Tuple = model_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase : int = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
UpperCamelCase : Dict = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : Optional[int] = prepare_img()
UpperCamelCase : int = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase : Any = np.ones((1, 196) , dtype=__SCREAMING_SNAKE_CASE )
# forward pass
UpperCamelCase : List[Any] = model(pixel_values=__SCREAMING_SNAKE_CASE , bool_masked_pos=__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = outputs.logits
# verify the logits
UpperCamelCase : Optional[int] = (1, 196, 8_192)
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-2 ) )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
UpperCamelCase : Any = self.default_image_processor
UpperCamelCase : List[Any] = prepare_img()
UpperCamelCase : List[str] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
# forward pass
UpperCamelCase : Union[str, Any] = model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = outputs.logits
# verify the logits
UpperCamelCase : Any = (1, 1_000)
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
UpperCamelCase : Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : Dict = prepare_img()
UpperCamelCase : List[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
# forward pass
UpperCamelCase : List[str] = model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = outputs.logits
# verify the logits
UpperCamelCase : str = (1, 21_841)
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
UpperCamelCase : List[str] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE )
| 367
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__UpperCAmelCase : int = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : int = []
UpperCamelCase : List[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase : Tuple = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) )
UpperCamelCase : Optional[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' )
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[str] = []
UpperCamelCase : str = []
for element in html_code.descendants:
if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase : Any = html.unescape(__SCREAMING_SNAKE_CASE ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : int = self.xpath_soup(__SCREAMING_SNAKE_CASE )
stringaxtag_seq.append(__SCREAMING_SNAKE_CASE )
stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ''''''
for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = False
# Check that strings has a valid type
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = True
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"""but is of type {type(__SCREAMING_SNAKE_CASE )}.""" )
UpperCamelCase : int = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) )
if not is_batched:
UpperCamelCase : Union[str, Any] = [html_strings]
# Get nodes + xpaths
UpperCamelCase : str = []
UpperCamelCase : int = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.get_three_from_single(__SCREAMING_SNAKE_CASE )
nodes.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
xpath_strings.append(__SCREAMING_SNAKE_CASE )
xpaths.append(__SCREAMING_SNAKE_CASE )
# return as Dict
UpperCamelCase : List[str] = {'''nodes''': nodes, '''xpaths''': xpaths}
UpperCamelCase : List[Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 315
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class UpperCAmelCase_ ( _a, _a):
'''simple docstring'''
__UpperCamelCase : List[str] = 1
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE=2_000 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=20 , __SCREAMING_SNAKE_CASE=1e-3 ):
"""simple docstring"""
UpperCamelCase : List[str] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : Optional[int] = None
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : int = torch.linspace(1 , self.config.sampling_eps , __SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
UpperCamelCase : Optional[Any] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
UpperCamelCase : Optional[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
UpperCamelCase : str = std.flatten()
while len(std.shape ) < len(score.shape ):
UpperCamelCase : Optional[int] = std.unsqueeze(-1 )
UpperCamelCase : List[str] = -score / std
# compute
UpperCamelCase : Tuple = -1.0 / len(self.timesteps )
UpperCamelCase : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
UpperCamelCase : int = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
UpperCamelCase : Union[str, Any] = beta_t.unsqueeze(-1 )
UpperCamelCase : List[Any] = -0.5 * beta_t * x
UpperCamelCase : Tuple = torch.sqrt(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = drift - diffusion**2 * score
UpperCamelCase : Optional[Any] = x + drift * dt
# add noise
UpperCamelCase : Dict = randn_tensor(x.shape , layout=x.layout , generator=__SCREAMING_SNAKE_CASE , device=x.device , dtype=x.dtype )
UpperCamelCase : Optional[Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 368
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__UpperCAmelCase : List[str] = getLogger(__name__)
__UpperCAmelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : int="summarization" , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Any , ):
"""simple docstring"""
UpperCamelCase : Dict = Path(SCREAMING_SNAKE_CASE_ ).open('''w''' , encoding='''utf-8''' )
UpperCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if fpaa:
UpperCamelCase : List[Any] = model.half()
UpperCamelCase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
UpperCamelCase : int = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if prefix is None:
UpperCamelCase : Union[str, Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ):
UpperCamelCase : Optional[int] = [prefix + text for text in examples_chunk]
UpperCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ , padding='''longest''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
UpperCamelCase : str = int(time.time() - start_time ) # seconds
UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
"""simple docstring"""
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=SCREAMING_SNAKE_CASE_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=SCREAMING_SNAKE_CASE_ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=SCREAMING_SNAKE_CASE_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=SCREAMING_SNAKE_CASE_ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
UpperCamelCase , UpperCamelCase : int = parser.parse_known_args()
UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
UpperCamelCase : str = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
UpperCamelCase : Tuple = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
UpperCamelCase : str = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE_ , )
if args.reference_path is None:
return {}
# Compute scores
UpperCamelCase : Tuple = calculate_bleu if '''translation''' in args.task else calculate_rouge
UpperCamelCase : Dict = [x.rstrip() for x in open(args.save_path ).readlines()]
UpperCamelCase : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase : dict = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
scores.update(SCREAMING_SNAKE_CASE_ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE_ )
if args.info:
UpperCamelCase : Optional[Any] = args.info
if verbose:
print(SCREAMING_SNAKE_CASE_ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE_ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 315
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : int = ["image_processor", "tokenizer"]
__UpperCamelCase : List[str] = "AutoImageProcessor"
__UpperCamelCase : Optional[Any] = "AutoTokenizer"
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
UpperCamelCase : Any = kwargs.pop('''feature_extractor''' )
UpperCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.image_processor
UpperCamelCase : int = False
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = kwargs.pop('''images''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = kwargs.pop('''text''' , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase : Union[str, Any] = args[0]
UpperCamelCase : str = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
UpperCamelCase : List[str] = self.image_processor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
UpperCamelCase : Optional[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase : List[str] = encodings['''input_ids''']
return inputs
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@contextmanager
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
UpperCamelCase : Any = True
UpperCamelCase : int = self.tokenizer
yield
UpperCamelCase : List[Any] = self.image_processor
UpperCamelCase : Tuple = False
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if added_vocab is None:
UpperCamelCase : str = self.tokenizer.get_added_vocab()
UpperCamelCase : int = {}
while tokens:
UpperCamelCase : Dict = re.search(R'''<s_(.*?)>''' , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
UpperCamelCase : List[str] = start_token.group(1 )
UpperCamelCase : Dict = re.search(Rf"""</s_{key}>""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
UpperCamelCase : Any = start_token.group()
if end_token is None:
UpperCamelCase : Optional[int] = tokens.replace(__SCREAMING_SNAKE_CASE , '''''' )
else:
UpperCamelCase : Dict = end_token.group()
UpperCamelCase : int = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
UpperCamelCase : Dict = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
UpperCamelCase : Tuple = self.tokenajson(__SCREAMING_SNAKE_CASE , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if value:
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : str = value[0]
UpperCamelCase : str = value
else: # leaf nodes
UpperCamelCase : Optional[int] = []
for leaf in content.split(R'''<sep/>''' ):
UpperCamelCase : Optional[int] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
UpperCamelCase : int = leaf[1:-2] # for categorical special tokens
output[key].append(__SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
UpperCamelCase : Tuple = output[key][0]
UpperCamelCase : List[Any] = tokens[tokens.find(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 315
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : List[Any] = analyze_text(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase : str = sum(single_char_strings.values() )
# one length string
UpperCamelCase : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase : Dict = single_char_strings[ch]
UpperCamelCase : str = my_str / all_sum
my_fir_sum += prob * math.loga(SCREAMING_SNAKE_CASE_ ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
UpperCamelCase : Tuple = sum(two_char_strings.values() )
UpperCamelCase : Any = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase : List[Any] = cha + cha
if sequence in two_char_strings:
UpperCamelCase : Optional[Any] = two_char_strings[sequence]
UpperCamelCase : Any = int(SCREAMING_SNAKE_CASE_ ) / all_sum
my_sec_sum += prob * math.loga(SCREAMING_SNAKE_CASE_ )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Optional[int] = Counter() # type: ignore
UpperCamelCase : List[str] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 370
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 315
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : Any = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 2_6
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCamelCase : List[Any] = True
elif char.isupper():
UpperCamelCase : List[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def a ( ):
"""simple docstring"""
from timeit import timeit
UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 371
|
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ):
"""simple docstring"""
UpperCamelCase : List[str] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 315
| 0
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def a ( SCREAMING_SNAKE_CASE_ : int=None ):
"""simple docstring"""
if subparsers is not None:
UpperCamelCase : int = subparsers.add_parser('''env''' )
else:
UpperCamelCase : List[str] = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=SCREAMING_SNAKE_CASE_ , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = torch.__version__
UpperCamelCase : str = torch.cuda.is_available()
UpperCamelCase : Dict = is_xpu_available()
UpperCamelCase : int = is_npu_available()
UpperCamelCase : List[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = load_config_from_file(args.config_file ).to_dict()
UpperCamelCase : Union[str, Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''PyTorch XPU available''': str(SCREAMING_SNAKE_CASE_ ),
'''PyTorch NPU available''': str(SCREAMING_SNAKE_CASE_ ),
'''System RAM''': F"""{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB""",
}
if pt_cuda_available:
UpperCamelCase : str = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
UpperCamelCase : List[Any] = (
'''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else F"""\t{accelerate_config}"""
)
print(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = accelerate_config
return info
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = env_command_parser()
UpperCamelCase : Optional[int] = parser.parse_args()
env_command(SCREAMING_SNAKE_CASE_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 350
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = eval_examples
UpperCamelCase : Optional[Any] = post_process_function
def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ):
"""simple docstring"""
UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Any = self.compute_metrics
UpperCamelCase : List[Any] = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Dict = time.time()
try:
UpperCamelCase : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : Union[str, Any] = compute_metrics
UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions )
UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
UpperCamelCase : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ):
"""simple docstring"""
UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Union[str, Any] = self.compute_metrics
UpperCamelCase : Tuple = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Optional[int] = time.time()
try:
UpperCamelCase : int = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : int = compute_metrics
UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' )
UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 315
| 0
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__UpperCAmelCase : List[str] = (3, 9, -11, 0, 7, 5, 1, -1)
__UpperCAmelCase : List[str] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : int
__UpperCamelCase : Node | None
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Node | None = None
for i in sorted(__SCREAMING_SNAKE_CASE , reverse=__SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = Node(__SCREAMING_SNAKE_CASE , self.head )
def __iter__( self ):
"""simple docstring"""
UpperCamelCase : str = self.head
while node:
yield node.data
UpperCamelCase : Tuple = node.next_node
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self ):
"""simple docstring"""
return " -> ".join([str(__SCREAMING_SNAKE_CASE ) for node in self] )
def a ( SCREAMING_SNAKE_CASE_ : SortedLinkedList , SCREAMING_SNAKE_CASE_ : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(SCREAMING_SNAKE_CASE_ ) + list(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 351
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(sorted(SCREAMING_SNAKE_CASE_ ) )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return word_by_signature[signature(SCREAMING_SNAKE_CASE_ )]
__UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__UpperCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()})
__UpperCAmelCase : Union[str, Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__UpperCAmelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 315
| 0
|
__UpperCAmelCase : int = 256
# Modulus to hash a string
__UpperCAmelCase : List[Any] = 1000003
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = len(SCREAMING_SNAKE_CASE_ )
if p_len > t_len:
return False
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : List[str] = 1
# Calculating the hash of pattern and substring of text
for i in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
UpperCamelCase : Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
UpperCamelCase : List[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
UpperCamelCase : Tuple = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = '''abc1abc12'''
UpperCamelCase : Any = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
UpperCamelCase : List[Any] = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Test 2)
UpperCamelCase : int = '''ABABX'''
UpperCamelCase : Tuple = '''ABABZABABYABABX'''
assert rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Test 3)
UpperCamelCase : Dict = '''AAAB'''
UpperCamelCase : Union[str, Any] = '''ABAAAAAB'''
assert rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Test 4)
UpperCamelCase : Any = '''abcdabcy'''
UpperCamelCase : Tuple = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Test 5)
UpperCamelCase : str = '''Lü'''
UpperCamelCase : Union[str, Any] = '''Lüsai'''
assert rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = '''Lue'''
assert not rabin_karp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 352
|
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) )
return data_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
score_lists.append(SCREAMING_SNAKE_CASE_ )
return score_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = final_scores[j] + ele
return final_scores
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
source_data[i].append(SCREAMING_SNAKE_CASE_ )
return source_data
| 315
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = "longformer"
def __init__( self , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 30_522 , __SCREAMING_SNAKE_CASE = 768 , __SCREAMING_SNAKE_CASE = 12 , __SCREAMING_SNAKE_CASE = 12 , __SCREAMING_SNAKE_CASE = 3_072 , __SCREAMING_SNAKE_CASE = "gelu" , __SCREAMING_SNAKE_CASE = 0.1 , __SCREAMING_SNAKE_CASE = 0.1 , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = 1e-12 , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = attention_window
UpperCamelCase : Optional[int] = sep_token_id
UpperCamelCase : str = bos_token_id
UpperCamelCase : Tuple = eos_token_id
UpperCamelCase : int = vocab_size
UpperCamelCase : Optional[Any] = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Any = intermediate_size
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Optional[int] = type_vocab_size
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : List[str] = layer_norm_eps
UpperCamelCase : Union[str, Any] = onnx_export
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "default" , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = True
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = super().outputs
if self.task == "default":
UpperCamelCase : Any = {0: '''batch'''}
return outputs
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
@property
def _lowercase ( self ):
"""simple docstring"""
return max(super().default_onnx_opset , 14 )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : str = super().generate_dummy_inputs(
preprocessor=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCamelCase : Optional[int] = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
UpperCamelCase : str = 1
return inputs
| 353
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Union[str, Any] = ""
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def a ( ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('''Processing...''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index, image in enumerate(SCREAMING_SNAKE_CASE_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase : Optional[int] = random_chars(3_2 )
UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" )
UpperCamelCase : Any = []
for anno in new_annos[index]:
UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(F"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ):
UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
UpperCamelCase : List[str] = in_file.readlines()
UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" )
UpperCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : str = []
UpperCamelCase : int = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = []
UpperCamelCase : Optional[int] = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = anno_list[idx]
UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ )
if flip_type == 1:
UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE_ )
new_imgs_list.append(SCREAMING_SNAKE_CASE_ )
return new_imgs_list, new_annos_lists, path_list
def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase : Any = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 315
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = "markuplm"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=1_024 , __SCREAMING_SNAKE_CASE=216 , __SCREAMING_SNAKE_CASE=1_001 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=50 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
# additional properties
UpperCamelCase = max_depth
UpperCamelCase = max_xpath_tag_unit_embeddings
UpperCamelCase = max_xpath_subs_unit_embeddings
UpperCamelCase = tag_pad_id
UpperCamelCase = subs_pad_id
UpperCamelCase = xpath_unit_hidden_size
| 354
|
import qiskit
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : int = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 315
| 0
|
# Algorithm for the pigeonhole sorting
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
UpperCamelCase : Optional[int] = min(SCREAMING_SNAKE_CASE_ ) # min() finds the minimum value
UpperCamelCase : Any = max(SCREAMING_SNAKE_CASE_ ) # max() finds the maximum value
UpperCamelCase : Optional[Any] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCamelCase : Any = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCamelCase : Union[str, Any] = 0
for count in range(SCREAMING_SNAKE_CASE_ ):
while holes[count] > 0:
holes[count] -= 1
UpperCamelCase : Optional[int] = count + min_val
i += 1
def a ( ):
UpperCamelCase : List[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(SCREAMING_SNAKE_CASE_ )
print('''Sorted order is:''' , ''' '''.join(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
| 355
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = CLIPConfig
__UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"]
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = CLIPVisionModel(config.vision_config )
UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy()
UpperCamelCase : Dict = []
UpperCamelCase : List[str] = image_embeds.shape[0]
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Optional[int] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase : List[str] = special_cos_dist[i][concept_idx]
UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCamelCase : Optional[int] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase : Optional[int] = cos_dist[i][concept_idx]
UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds )
UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Union[str, Any] = 0.0
UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
UpperCamelCase : int = special_care * 0.01
UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 315
| 0
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase : Dict = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = AutoModelForSeqaSeqLM.from_config(SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ).save_pretrained(SCREAMING_SNAKE_CASE_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 356
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
UpperCamelCase : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 315
| 0
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(sorted(SCREAMING_SNAKE_CASE_ ) )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return word_by_signature[signature(SCREAMING_SNAKE_CASE_ )]
__UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__UpperCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()})
__UpperCAmelCase : Union[str, Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__UpperCAmelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 357
|
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = "levit"
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=[128, 256, 384] , __SCREAMING_SNAKE_CASE=[4, 8, 12] , __SCREAMING_SNAKE_CASE=[4, 4, 4] , __SCREAMING_SNAKE_CASE=[16, 16, 16] , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=0.02 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = image_size
UpperCamelCase : Dict = num_channels
UpperCamelCase : Union[str, Any] = kernel_size
UpperCamelCase : List[Any] = stride
UpperCamelCase : Union[str, Any] = padding
UpperCamelCase : Optional[int] = hidden_sizes
UpperCamelCase : str = num_attention_heads
UpperCamelCase : Optional[int] = depths
UpperCamelCase : Optional[int] = key_dim
UpperCamelCase : Optional[Any] = drop_path_rate
UpperCamelCase : str = patch_size
UpperCamelCase : List[str] = attention_ratio
UpperCamelCase : List[Any] = mlp_ratio
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : str = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Tuple = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
| 358
|
import math
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = factor * value
UpperCamelCase : Optional[int] = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 315
| 0
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _UpperCAmelCase ( ):
"""simple docstring"""
UpperCamelCase : Any = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
UpperCamelCase : Any = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ )
try:
UpperCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCamelCase : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
UpperCamelCase : Optional[Any] = ''' '''.join(str(SCREAMING_SNAKE_CASE_ ).split(''' ''' )[:-1] )
UpperCamelCase : str = ''''''
UpperCamelCase : List[str] = eval(str(SCREAMING_SNAKE_CASE_ ).split(''' ''' )[-1] )
UpperCamelCase : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : Any = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ )
raise ValueError(SCREAMING_SNAKE_CASE_ )
benchmark.run()
if __name__ == "__main__":
main()
| 359
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 315
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = "ibert"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = quant_mode
UpperCamelCase : Any = force_dequant
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 360
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : List[str] = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Union[str, Any] = min_resolution
UpperCamelCase : Tuple = max_resolution
UpperCamelCase : List[str] = do_resize
UpperCamelCase : List[str] = size
UpperCamelCase : int = apply_ocr
def _lowercase ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE )
# Test batched
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : Optional[int] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
UpperCamelCase : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase : Union[str, Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCamelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE )
# with apply_OCR = False
UpperCamelCase : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 315
| 0
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[int] = DDIMPipeline
__UpperCamelCase : Any = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCamelCase : int = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
__UpperCamelCase : str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase : List[str] = False
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase : Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
UpperCamelCase : List[str] = DDIMScheduler()
UpperCamelCase : str = {'''unet''': unet, '''scheduler''': scheduler}
return components
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
UpperCamelCase : List[str] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : List[str] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = '''cpu'''
UpperCamelCase : Any = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
UpperCamelCase : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
UpperCamelCase : Tuple = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
UpperCamelCase : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def _lowercase ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _lowercase ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3e-3 )
def _lowercase ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _lowercase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = '''google/ddpm-cifar10-32'''
UpperCamelCase : str = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = DDIMScheduler()
UpperCamelCase : List[Any] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddim.to(__SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = torch.manual_seed(0 )
UpperCamelCase : List[Any] = ddim(generator=__SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
UpperCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase : str = np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = '''google/ddpm-ema-bedroom-256'''
UpperCamelCase : Optional[Any] = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddpm.to(__SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase : List[str] = ddpm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase : Optional[int] = np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 361
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a ( SCREAMING_SNAKE_CASE_ : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = fetch_california_housing()
UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 315
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Optional[int] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase : Optional[int] = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
__UpperCAmelCase : Optional[int] = "▁"
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : str = ["input_ids", "attention_mask"]
__UpperCamelCase : int = BarthezTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Optional[Any] = vocab_file
UpperCamelCase : Tuple = False if not self.vocab_file else True
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
UpperCamelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : str = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 362
|
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase : Union[str, Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 315
| 0
|
import requests
from bsa import BeautifulSoup
def a ( SCREAMING_SNAKE_CASE_ : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
UpperCamelCase : Optional[int] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' )
UpperCamelCase : Dict = soup.findAll('''h1''' )
UpperCamelCase : Any = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 363
|
import collections
import os
import re
from pathlib import Path
__UpperCAmelCase : List[str] = "src/transformers"
# Matches is_xxx_available()
__UpperCAmelCase : int = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase : Optional[int] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase : List[Any] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__UpperCAmelCase : List[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase : str = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase : Dict = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase : str = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__UpperCAmelCase : Any = re.compile(r"^\s*try:")
# Catches a line with else:
__UpperCAmelCase : List[Any] = re.compile(r"^\s*else:")
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCamelCase : Tuple = f.readlines()
UpperCamelCase : Tuple = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase : List[Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCamelCase : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
UpperCamelCase : str = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCamelCase : List[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCamelCase : str = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
UpperCamelCase : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase : int = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCamelCase : Tuple = lines[line_index]
UpperCamelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase : Any = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCamelCase : Optional[Any] = lines[line_index]
UpperCamelCase : str = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
UpperCamelCase : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase : Dict = []
for key in import_dict_objects.keys():
UpperCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase : List[str] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' )
UpperCamelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
UpperCamelCase : str = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = short_path.replace(os.path.sep , '''.''' )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
__UpperCAmelCase : Optional[int] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def a ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
UpperCamelCase : Tuple = direct_transformers_import(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , '''r''' ) as f:
UpperCamelCase : List[Any] = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : str = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 315
| 0
|
from collections.abc import Callable
def a ( SCREAMING_SNAKE_CASE_ : Callable[[float], float] , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
UpperCamelCase : float = a
UpperCamelCase : float = b
if function(SCREAMING_SNAKE_CASE_ ) == 0: # one of the a or b is a root for the function
return a
elif function(SCREAMING_SNAKE_CASE_ ) == 0:
return b
elif (
function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
UpperCamelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(SCREAMING_SNAKE_CASE_ ) == 0:
return mid
elif function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) < 0:
UpperCamelCase : str = mid
else:
UpperCamelCase : Dict = mid
UpperCamelCase : Union[str, Any] = start + (end - start) / 2.0
return mid
def a ( SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 364
|
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : Any = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 2_6
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCamelCase : List[Any] = True
elif char.isupper():
UpperCamelCase : List[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def a ( ):
"""simple docstring"""
from timeit import timeit
UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 315
| 0
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 50 , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Any = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase : Tuple = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase : Tuple = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase : Union[str, Any] = {}
if accepts_eta:
UpperCamelCase : Optional[Any] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase : Dict = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# predict the noise residual
UpperCamelCase : Any = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : int = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
# decode the image latents with the VAE
UpperCamelCase : str = self.vqvae.decode(__SCREAMING_SNAKE_CASE ).sample
UpperCamelCase : str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase : List[Any] = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 365
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase : Union[str, Any] = logging.getLogger()
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase : List[str] = parser.parse_args()
return args.f
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 )
@slow
@require_torch_non_multi_gpu
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
| 315
| 0
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
UpperCamelCase : str = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Any = seq_length
UpperCamelCase : Optional[Any] = is_training
UpperCamelCase : Tuple = use_input_mask
UpperCamelCase : Union[str, Any] = use_token_type_ids
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : str = vocab_size
UpperCamelCase : Dict = hidden_size
UpperCamelCase : List[str] = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : Optional[Any] = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Dict = type_vocab_size
UpperCamelCase : Tuple = type_sequence_label_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : int = num_labels
UpperCamelCase : List[str] = num_choices
UpperCamelCase : Tuple = scope
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : List[str] = None
UpperCamelCase : List[Any] = None
if self.use_labels:
UpperCamelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float()
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Dict = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
UpperCamelCase
) : List[str] = config_and_inputs
UpperCamelCase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Optional[int] = (EsmForProteinFolding,) if is_torch_available() else ()
__UpperCamelCase : int = ()
__UpperCamelCase : Dict = {} if is_torch_available() else {}
__UpperCamelCase : str = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = EsmFoldModelTester(self )
UpperCamelCase : str = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Does not support attention outputs''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold only has one output format.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@require_torch
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
UpperCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE )['''positions''']
UpperCamelCase : List[str] = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 366
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = "ibert"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = quant_mode
UpperCamelCase : Any = force_dequant
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 315
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : str = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
UpperCamelCase : Optional[Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__UpperCAmelCase : Any = input("Enter numbers separated by a comma:\n").strip()
__UpperCAmelCase : Any = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 367
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__UpperCAmelCase : int = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : int = []
UpperCamelCase : List[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase : Tuple = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) )
UpperCamelCase : Optional[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' )
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[str] = []
UpperCamelCase : str = []
for element in html_code.descendants:
if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase : Any = html.unescape(__SCREAMING_SNAKE_CASE ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : int = self.xpath_soup(__SCREAMING_SNAKE_CASE )
stringaxtag_seq.append(__SCREAMING_SNAKE_CASE )
stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ''''''
for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = False
# Check that strings has a valid type
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = True
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"""but is of type {type(__SCREAMING_SNAKE_CASE )}.""" )
UpperCamelCase : int = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) )
if not is_batched:
UpperCamelCase : Union[str, Any] = [html_strings]
# Get nodes + xpaths
UpperCamelCase : str = []
UpperCamelCase : int = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.get_three_from_single(__SCREAMING_SNAKE_CASE )
nodes.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
xpath_strings.append(__SCREAMING_SNAKE_CASE )
xpaths.append(__SCREAMING_SNAKE_CASE )
# return as Dict
UpperCamelCase : List[str] = {'''nodes''': nodes, '''xpaths''': xpaths}
UpperCamelCase : List[Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 315
| 0
|
__UpperCAmelCase : str = {str(digit): digit**5 for digit in range(10)}
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(SCREAMING_SNAKE_CASE_ ) )
def a ( ):
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(solution())
| 368
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__UpperCAmelCase : List[str] = getLogger(__name__)
__UpperCAmelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : int="summarization" , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Any , ):
"""simple docstring"""
UpperCamelCase : Dict = Path(SCREAMING_SNAKE_CASE_ ).open('''w''' , encoding='''utf-8''' )
UpperCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if fpaa:
UpperCamelCase : List[Any] = model.half()
UpperCamelCase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
UpperCamelCase : int = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if prefix is None:
UpperCamelCase : Union[str, Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ):
UpperCamelCase : Optional[int] = [prefix + text for text in examples_chunk]
UpperCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ , padding='''longest''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
UpperCamelCase : str = int(time.time() - start_time ) # seconds
UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
"""simple docstring"""
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=SCREAMING_SNAKE_CASE_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=SCREAMING_SNAKE_CASE_ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=SCREAMING_SNAKE_CASE_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=SCREAMING_SNAKE_CASE_ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
UpperCamelCase , UpperCamelCase : int = parser.parse_known_args()
UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
UpperCamelCase : str = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
UpperCamelCase : Tuple = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
UpperCamelCase : str = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE_ , )
if args.reference_path is None:
return {}
# Compute scores
UpperCamelCase : Tuple = calculate_bleu if '''translation''' in args.task else calculate_rouge
UpperCamelCase : Dict = [x.rstrip() for x in open(args.save_path ).readlines()]
UpperCamelCase : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase : dict = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
scores.update(SCREAMING_SNAKE_CASE_ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE_ )
if args.info:
UpperCamelCase : Optional[Any] = args.info
if verbose:
print(SCREAMING_SNAKE_CASE_ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE_ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 315
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : list[list[int]] = [[0 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCamelCase : Any = 1
for n in range(m + 1 ):
for k in range(1 , SCREAMING_SNAKE_CASE_ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase : List[Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
__UpperCAmelCase : List[Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 369
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : int = ["image_processor", "tokenizer"]
__UpperCamelCase : List[str] = "AutoImageProcessor"
__UpperCamelCase : Optional[Any] = "AutoTokenizer"
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
UpperCamelCase : Any = kwargs.pop('''feature_extractor''' )
UpperCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.image_processor
UpperCamelCase : int = False
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = kwargs.pop('''images''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = kwargs.pop('''text''' , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase : Union[str, Any] = args[0]
UpperCamelCase : str = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
UpperCamelCase : List[str] = self.image_processor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
UpperCamelCase : Optional[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase : List[str] = encodings['''input_ids''']
return inputs
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@contextmanager
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
UpperCamelCase : Any = True
UpperCamelCase : int = self.tokenizer
yield
UpperCamelCase : List[Any] = self.image_processor
UpperCamelCase : Tuple = False
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if added_vocab is None:
UpperCamelCase : str = self.tokenizer.get_added_vocab()
UpperCamelCase : int = {}
while tokens:
UpperCamelCase : Dict = re.search(R'''<s_(.*?)>''' , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
UpperCamelCase : List[str] = start_token.group(1 )
UpperCamelCase : Dict = re.search(Rf"""</s_{key}>""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
UpperCamelCase : Any = start_token.group()
if end_token is None:
UpperCamelCase : Optional[int] = tokens.replace(__SCREAMING_SNAKE_CASE , '''''' )
else:
UpperCamelCase : Dict = end_token.group()
UpperCamelCase : int = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = re.escape(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
UpperCamelCase : Dict = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
UpperCamelCase : Tuple = self.tokenajson(__SCREAMING_SNAKE_CASE , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if value:
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : str = value[0]
UpperCamelCase : str = value
else: # leaf nodes
UpperCamelCase : Optional[int] = []
for leaf in content.split(R'''<sep/>''' ):
UpperCamelCase : Optional[int] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
UpperCamelCase : int = leaf[1:-2] # for categorical special tokens
output[key].append(__SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
UpperCamelCase : Tuple = output[key][0]
UpperCamelCase : List[Any] = tokens[tokens.find(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 315
| 0
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Any = image.size
UpperCamelCase : Optional[int] = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase : Union[str, Any] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
UpperCamelCase : List[Any] = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) / 255.0
UpperCamelCase : Union[str, Any] = image[None].transpose(0 , 3 , 1 , 2 )
UpperCamelCase : List[str] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
return 2.0 * image - 1.0
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 100 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCamelCase : Optional[int] = 1
elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ):
UpperCamelCase : Optional[int] = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE )}""" )
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCamelCase : Tuple = preprocess(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCamelCase : Dict = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCamelCase : List[Any] = next(self.unet.parameters() ).dtype
UpperCamelCase : Tuple = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device )
UpperCamelCase : Union[str, Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase : Optional[Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase : Tuple = {}
if accepts_eta:
UpperCamelCase : Tuple = eta
for t in self.progress_bar(__SCREAMING_SNAKE_CASE ):
# concat latents and low resolution image in the channel dimension.
UpperCamelCase : int = torch.cat([latents, image] , dim=1 )
UpperCamelCase : Optional[Any] = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# predict the noise residual
UpperCamelCase : int = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Optional[Any] = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
# decode the image latents with the VQVAE
UpperCamelCase : Optional[Any] = self.vqvae.decode(__SCREAMING_SNAKE_CASE ).sample
UpperCamelCase : Any = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0 )
UpperCamelCase : Tuple = image / 2 + 0.5
UpperCamelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase : List[Any] = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 370
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 315
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 0 ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = length or len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
UpperCamelCase : int = list_data[i + 1], list_data[i]
UpperCamelCase : int = True
return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ):
"""simple docstring"""
UpperCamelCase : List[str] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 315
| 0
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 350
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = eval_examples
UpperCamelCase : Optional[Any] = post_process_function
def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ):
"""simple docstring"""
UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Any = self.compute_metrics
UpperCamelCase : List[Any] = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Dict = time.time()
try:
UpperCamelCase : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : Union[str, Any] = compute_metrics
UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions )
UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
UpperCamelCase : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ):
"""simple docstring"""
UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Union[str, Any] = self.compute_metrics
UpperCamelCase : Tuple = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Optional[int] = time.time()
try:
UpperCamelCase : int = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : int = compute_metrics
UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' )
UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 315
| 0
|
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = str(id_ )
UpperCamelCase : Optional[int] = None
UpperCamelCase : Any = None
UpperCamelCase : Any = []
UpperCamelCase : str = {} # {vertex:distance}
def __lt__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.key < other.key
def __repr__( self ):
"""simple docstring"""
return self.id
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
self.neighbors.append(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = weight
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , SCREAMING_SNAKE_CASE_ )
graph[b - 1].add_edge(graph[a - 1] , SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : Vertex ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
for u in graph:
UpperCamelCase : Optional[int] = math.inf
UpperCamelCase : Optional[Any] = None
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = graph[:]
while q:
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ )
q.remove(SCREAMING_SNAKE_CASE_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCamelCase : List[str] = u
UpperCamelCase : List[str] = u.edges[v.id]
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : Vertex ):
"""simple docstring"""
for u in graph:
UpperCamelCase : Optional[Any] = math.inf
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = list(SCREAMING_SNAKE_CASE_ )
hq.heapify(SCREAMING_SNAKE_CASE_ )
while h:
UpperCamelCase : List[Any] = hq.heappop(SCREAMING_SNAKE_CASE_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCamelCase : Optional[Any] = u
UpperCamelCase : Union[str, Any] = u.edges[v.id]
hq.heapify(SCREAMING_SNAKE_CASE_ )
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def a ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(sorted(SCREAMING_SNAKE_CASE_ ) )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return word_by_signature[signature(SCREAMING_SNAKE_CASE_ )]
__UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__UpperCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()})
__UpperCAmelCase : Union[str, Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__UpperCAmelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 315
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Union[str, Any] = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : Any = {
"gpt-neox-20b": 2048,
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = ["input_ids", "attention_mask"]
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCamelCase : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
UpperCamelCase : Optional[int] = add_prefix_space
UpperCamelCase : Optional[int] = pre_tok_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = add_prefix_space
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : List[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(__SCREAMING_SNAKE_CASE ) > self.model_max_length:
UpperCamelCase : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 352
|
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) )
return data_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
score_lists.append(SCREAMING_SNAKE_CASE_ )
return score_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = final_scores[j] + ele
return final_scores
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
source_data[i].append(SCREAMING_SNAKE_CASE_ )
return source_data
| 315
| 0
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCAmelCase : Any = logging.getLogger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : torch.nn.Module , SCREAMING_SNAKE_CASE_ : BnbQuantizationConfig , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE_ : bool = False , ):
"""simple docstring"""
UpperCamelCase : Any = bnb_quantization_config.load_in_abit
UpperCamelCase : List[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
UpperCamelCase : List[Any] = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(device_map.keys() ) > 1:
UpperCamelCase : str = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase : Union[str, Any] = get_keys_to_not_convert(SCREAMING_SNAKE_CASE_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase : Any = []
UpperCamelCase : Any = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE_ )
# compatibility with peft
UpperCamelCase : List[Any] = load_in_abit
UpperCamelCase : int = load_in_abit
UpperCamelCase : Tuple = get_parameter_device(SCREAMING_SNAKE_CASE_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
UpperCamelCase : str = replace_with_bnb_layers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
# convert param to the right dtype
UpperCamelCase : List[str] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase : Union[str, Any] = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
UpperCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
param.to(SCREAMING_SNAKE_CASE_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
UpperCamelCase : Optional[int] = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_memory=SCREAMING_SNAKE_CASE_ , no_split_module_classes=SCREAMING_SNAKE_CASE_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[Any] = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE_ , offload_state_dict=SCREAMING_SNAKE_CASE_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE_ , device_map=SCREAMING_SNAKE_CASE_ , offload_dir=SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase : Any = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
UpperCamelCase : List[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase : List[str] = {}
UpperCamelCase : Dict = special_dtypes
UpperCamelCase : Optional[Any] = no_split_module_classes
UpperCamelCase : Tuple = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase : int = get_balanced_memory(
SCREAMING_SNAKE_CASE_ , low_zero=(device_map == '''balanced_low_0''') , max_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Tuple = max_memory
UpperCamelCase : Any = infer_auto_device_map(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# check if don't have any quantized module on the cpu
UpperCamelCase : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase : Union[str, Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ):
"""simple docstring"""
if modules_to_not_convert is None:
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : Any = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase : Optional[Any] = []
current_key_name.append(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase : Union[str, Any] = '''.'''.join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase : Dict = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase : Optional[int] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
UpperCamelCase : Any = module.weight.data
if module.bias is not None:
UpperCamelCase : Tuple = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = True
if len(list(module.children() ) ) > 0:
UpperCamelCase : Optional[int] = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
with init_empty_weights():
UpperCamelCase : Any = deepcopy(SCREAMING_SNAKE_CASE_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase : Dict = find_tied_parameters(SCREAMING_SNAKE_CASE_ )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase : Dict = sum(SCREAMING_SNAKE_CASE_ , [] )
UpperCamelCase : List[str] = len(SCREAMING_SNAKE_CASE_ ) > 0
# Check if it is a base model
UpperCamelCase : Optional[int] = False
if hasattr(SCREAMING_SNAKE_CASE_ , '''base_model_prefix''' ):
UpperCamelCase : Optional[Any] = not hasattr(SCREAMING_SNAKE_CASE_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase : Any = list(model.named_children() )
UpperCamelCase : List[str] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase : List[Any] = set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = list(set(SCREAMING_SNAKE_CASE_ ) ) + list(SCREAMING_SNAKE_CASE_ )
# remove ".weight" from the keys
UpperCamelCase : Optional[int] = ['''.weight''', '''.bias''']
UpperCamelCase : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase : Optional[Any] = name.replace(SCREAMING_SNAKE_CASE_ , '''''' )
filtered_module_names.append(SCREAMING_SNAKE_CASE_ )
return filtered_module_names
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , bnb.nn.Linearabit ):
return True
return False
def a ( SCREAMING_SNAKE_CASE_ : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 , dtype=SCREAMING_SNAKE_CASE_ , value=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = param_name
UpperCamelCase : Union[str, Any] = model
if "." in tensor_name:
UpperCamelCase : Any = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCamelCase : Any = new_module
UpperCamelCase : int = splits[-1]
# offload weights
UpperCamelCase : List[Any] = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , )
else:
offload_weight(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
offload_weight(SCREAMING_SNAKE_CASE_ , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''meta''' , dtype=SCREAMING_SNAKE_CASE_ , value=torch.empty(*param.size() ) )
| 353
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Union[str, Any] = ""
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def a ( ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('''Processing...''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index, image in enumerate(SCREAMING_SNAKE_CASE_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase : Optional[int] = random_chars(3_2 )
UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" )
UpperCamelCase : Any = []
for anno in new_annos[index]:
UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(F"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ):
UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
UpperCamelCase : List[str] = in_file.readlines()
UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" )
UpperCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : str = []
UpperCamelCase : int = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = []
UpperCamelCase : Optional[int] = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = anno_list[idx]
UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ )
if flip_type == 1:
UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE_ )
new_imgs_list.append(SCREAMING_SNAKE_CASE_ )
return new_imgs_list, new_annos_lists, path_list
def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase : Any = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 315
| 0
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : int = MBartConfig
__UpperCamelCase : int = {}
__UpperCamelCase : Tuple = "gelu"
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=20 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase = prepare_mbart_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, inputs_dict
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = TFMBartModel(config=__SCREAMING_SNAKE_CASE ).get_decoder()
UpperCamelCase = inputs_dict['''input_ids''']
UpperCamelCase = input_ids[:1, :]
UpperCamelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCamelCase = inputs_dict['''head_mask''']
UpperCamelCase = 1
# first forward pass
UpperCamelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
UpperCamelCase = outputs.to_tuple()
UpperCamelCase = past_key_values[1]
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
UpperCamelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[str] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCamelCase : Tuple = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : Tuple = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Any = False
__UpperCamelCase : str = False
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = TFMBartModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : int = [
" UN Chief Says There Is No Military Solution in Syria",
]
__UpperCamelCase : Union[str, Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
__UpperCamelCase : Tuple = "facebook/mbart-large-en-ro"
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = self.translate_src_text(**__SCREAMING_SNAKE_CASE )
self.assertListEqual(self.expected_text , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = self.tokenizer(self.src_text , **__SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCamelCase = self.tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
return generated_words
@slow
def _lowercase ( self ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 354
|
import qiskit
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : int = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 315
| 0
|
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Any = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : str = "xlm-prophetnet"
__UpperCamelCase : Any = ["past_key_values"]
__UpperCamelCase : Optional[Any] = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self , __SCREAMING_SNAKE_CASE = 0.1 , __SCREAMING_SNAKE_CASE = "gelu" , __SCREAMING_SNAKE_CASE = 30_522 , __SCREAMING_SNAKE_CASE = 1_024 , __SCREAMING_SNAKE_CASE = 4_096 , __SCREAMING_SNAKE_CASE = 12 , __SCREAMING_SNAKE_CASE = 16 , __SCREAMING_SNAKE_CASE = 4_096 , __SCREAMING_SNAKE_CASE = 12 , __SCREAMING_SNAKE_CASE = 16 , __SCREAMING_SNAKE_CASE = 0.1 , __SCREAMING_SNAKE_CASE = 0.1 , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 32 , __SCREAMING_SNAKE_CASE = 128 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 2 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Dict = encoder_ffn_dim
UpperCamelCase : str = num_encoder_layers
UpperCamelCase : Tuple = num_encoder_attention_heads
UpperCamelCase : Any = decoder_ffn_dim
UpperCamelCase : Union[str, Any] = num_decoder_layers
UpperCamelCase : Union[str, Any] = num_decoder_attention_heads
UpperCamelCase : Any = max_position_embeddings
UpperCamelCase : Union[str, Any] = init_std # Normal(0, this parameter)
UpperCamelCase : Optional[Any] = activation_function
# parameters for xlmprophetnet
UpperCamelCase : Union[str, Any] = ngram
UpperCamelCase : Union[str, Any] = num_buckets
UpperCamelCase : Optional[int] = relative_max_distance
UpperCamelCase : List[Any] = disable_ngram_loss
UpperCamelCase : List[Any] = eps
# 3 Types of Dropout
UpperCamelCase : List[str] = attention_dropout
UpperCamelCase : Union[str, Any] = activation_dropout
UpperCamelCase : int = dropout
UpperCamelCase : Any = use_cache
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , add_cross_attention=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 355
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = CLIPConfig
__UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"]
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = CLIPVisionModel(config.vision_config )
UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy()
UpperCamelCase : Dict = []
UpperCamelCase : List[str] = image_embeds.shape[0]
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Optional[int] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase : List[str] = special_cos_dist[i][concept_idx]
UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCamelCase : Optional[int] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase : Optional[int] = cos_dist[i][concept_idx]
UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds )
UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Union[str, Any] = 0.0
UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
UpperCamelCase : int = special_care * 0.01
UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 315
| 0
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : float = 0.0
__UpperCamelCase : int = 1
__UpperCamelCase : int = 1
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : jnp.dtype = jnp.floataa
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = []
UpperCamelCase : str = []
for i in range(self.num_layers ):
UpperCamelCase : Dict = self.in_channels if i == 0 else self.out_channels
UpperCamelCase : str = FlaxResnetBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = resnets
UpperCamelCase : List[str] = attentions
if self.add_downsample:
UpperCamelCase : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
UpperCamelCase : Any = ()
for resnet, attn in zip(self.resnets , self.attentions ):
UpperCamelCase : List[Any] = resnet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = attn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=__SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
UpperCamelCase : int = self.downsamplers_a(__SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : float = 0.0
__UpperCamelCase : int = 1
__UpperCamelCase : bool = True
__UpperCamelCase : jnp.dtype = jnp.floataa
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = []
for i in range(self.num_layers ):
UpperCamelCase : Union[str, Any] = self.in_channels if i == 0 else self.out_channels
UpperCamelCase : Optional[Any] = FlaxResnetBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = resnets
if self.add_downsample:
UpperCamelCase : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = ()
for resnet in self.resnets:
UpperCamelCase : Optional[int] = resnet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=__SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
UpperCamelCase : Union[str, Any] = self.downsamplers_a(__SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : float = 0.0
__UpperCamelCase : int = 1
__UpperCamelCase : int = 1
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : jnp.dtype = jnp.floataa
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = []
UpperCamelCase : Any = []
for i in range(self.num_layers ):
UpperCamelCase : Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCamelCase : Any = self.prev_output_channel if i == 0 else self.out_channels
UpperCamelCase : int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = resnets
UpperCamelCase : List[str] = attentions
if self.add_upsample:
UpperCamelCase : Dict = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
UpperCamelCase : Optional[Any] = res_hidden_states_tuple[-1]
UpperCamelCase : Optional[int] = res_hidden_states_tuple[:-1]
UpperCamelCase : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCamelCase : Dict = resnet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = attn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=__SCREAMING_SNAKE_CASE )
if self.add_upsample:
UpperCamelCase : Optional[int] = self.upsamplers_a(__SCREAMING_SNAKE_CASE )
return hidden_states
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : float = 0.0
__UpperCamelCase : int = 1
__UpperCamelCase : bool = True
__UpperCamelCase : jnp.dtype = jnp.floataa
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = []
for i in range(self.num_layers ):
UpperCamelCase : Union[str, Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCamelCase : Tuple = self.prev_output_channel if i == 0 else self.out_channels
UpperCamelCase : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = resnets
if self.add_upsample:
UpperCamelCase : Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
UpperCamelCase : Tuple = res_hidden_states_tuple[-1]
UpperCamelCase : List[Any] = res_hidden_states_tuple[:-1]
UpperCamelCase : Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCamelCase : int = resnet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=__SCREAMING_SNAKE_CASE )
if self.add_upsample:
UpperCamelCase : List[Any] = self.upsamplers_a(__SCREAMING_SNAKE_CASE )
return hidden_states
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
__UpperCamelCase : int
__UpperCamelCase : float = 0.0
__UpperCamelCase : int = 1
__UpperCamelCase : int = 1
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : jnp.dtype = jnp.floataa
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
UpperCamelCase : Dict = []
for _ in range(self.num_layers ):
UpperCamelCase : Dict = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = resnets
UpperCamelCase : int = attentions
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
UpperCamelCase : int = self.resnets[0](__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
UpperCamelCase : Union[str, Any] = attn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = resnet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=__SCREAMING_SNAKE_CASE )
return hidden_states
| 356
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
UpperCamelCase : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 315
| 0
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''depth_multiplier''' ) )
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=0.25 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu6" , __SCREAMING_SNAKE_CASE=1_280 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
UpperCamelCase : int = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Tuple = num_channels
UpperCamelCase : List[str] = image_size
UpperCamelCase : Dict = depth_multiplier
UpperCamelCase : Union[str, Any] = depth_divisible_by
UpperCamelCase : Dict = min_depth
UpperCamelCase : str = expand_ratio
UpperCamelCase : List[Any] = tf_padding
UpperCamelCase : List[Any] = output_stride
UpperCamelCase : str = first_layer_is_expansion
UpperCamelCase : Any = finegrained_output
UpperCamelCase : Optional[Any] = hidden_act
UpperCamelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCamelCase : List[Any] = classifier_dropout_prob
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : Tuple = is_training
UpperCamelCase : str = num_labels
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : int = scope
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Dict = None
UpperCamelCase : Tuple = None
if self.use_labels:
UpperCamelCase : int = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowercase ( self ):
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = MobileNetVaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : str = MobileNetVaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : int = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : List[str] = MobileNetVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.prepare_config_and_inputs()
UpperCamelCase : List[str] = config_and_inputs
UpperCamelCase : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase : Tuple = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : str = False
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : str = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = MobileNetVaModelTester(self )
UpperCamelCase : Tuple = MobileNetVaConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : str = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[str] = [*signature.parameters.keys()]
UpperCamelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase : Dict = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Union[str, Any] = outputs.hidden_states
UpperCamelCase : Any = 16
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Any = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : Optional[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : int = MobileNetVaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def a ( ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = self.default_image_processor
UpperCamelCase : Tuple = prepare_img()
UpperCamelCase : Any = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase : str = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase : Any = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : int = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCamelCase : List[str] = model.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCamelCase : Optional[int] = prepare_img()
UpperCamelCase : Any = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = outputs.logits
# verify the logits
UpperCamelCase : Union[str, Any] = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = torch.tensor(
[
[[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]],
[[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]],
[[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]],
] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 357
|
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315
| 0
|
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : int = 0
__UpperCamelCase : bool = False
__UpperCamelCase : float = 3.0
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=__SCREAMING_SNAKE_CASE ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase : str = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , __SCREAMING_SNAKE_CASE )
@require_multi_gpu
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase : Any = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__UpperCAmelCase : Any = Accelerator(kwargs_handlers=[ddp_scaler])
__UpperCAmelCase : str = torch.nn.Linear(100, 200)
__UpperCAmelCase : Any = accelerator.prepare(model)
# Check the values changed in kwargs
__UpperCAmelCase : Optional[Any] = ""
__UpperCAmelCase : Dict = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 358
|
import math
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = factor * value
UpperCamelCase : Optional[int] = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 315
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase : Union[str, Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = ["DeiTFeatureExtractor"]
__UpperCAmelCase : Tuple = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : int = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 359
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 315
| 0
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__UpperCAmelCase : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
__UpperCAmelCase : int = "CompVis/stable-diffusion-v1-2"
__UpperCAmelCase : Union[str, Any] = "CompVis/stable-diffusion-v1-3"
__UpperCAmelCase : Tuple = "CompVis/stable-diffusion-v1-4"
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
super()._init_()
UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = StableDiffusionPipeline(
vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , requires_safety_checker=__SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _lowercase ( self ):
"""simple docstring"""
return {k: getattr(self , __SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith('''_''' )}
def _lowercase ( self , __SCREAMING_SNAKE_CASE = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
self.enable_attention_slicing(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 50 , __SCREAMING_SNAKE_CASE = 7.5 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 50 , __SCREAMING_SNAKE_CASE = 7.5 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 50 , __SCREAMING_SNAKE_CASE = 7.5 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 50 , __SCREAMING_SNAKE_CASE = 7.5 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 50 , __SCREAMING_SNAKE_CASE = 7.5 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase : Optional[int] = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase : Dict = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase : Union[str, Any] = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase : Optional[int] = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 360
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : List[str] = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Union[str, Any] = min_resolution
UpperCamelCase : Tuple = max_resolution
UpperCamelCase : List[str] = do_resize
UpperCamelCase : List[str] = size
UpperCamelCase : int = apply_ocr
def _lowercase ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE )
# Test batched
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : Optional[int] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
UpperCamelCase : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase : Union[str, Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCamelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE )
# with apply_OCR = False
UpperCamelCase : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 315
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_a)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : str = field(default="language-modeling", metadata={"include_in_asdict_even_if_is_default": True})
__UpperCamelCase : ClassVar[Features] = Features({"text": Value("string")})
__UpperCamelCase : ClassVar[Features] = Features({})
__UpperCamelCase : str = "text"
@property
def _lowercase ( self ):
"""simple docstring"""
return {self.text_column: "text"}
| 361
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a ( SCREAMING_SNAKE_CASE_ : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = fetch_california_housing()
UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 315
| 0
|
from manim import *
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase : Optional[int] = Rectangle(height=0.25 , width=0.25 )
UpperCamelCase : Any = [mem.copy() for i in range(6 )]
UpperCamelCase : List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase : List[str] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase : int = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase : List[Any] = VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase : Dict = Text('''CPU''' , font_size=24 )
UpperCamelCase : str = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = [mem.copy() for i in range(4 )]
UpperCamelCase : Union[str, Any] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase : int = Text('''GPU''' , font_size=24 )
UpperCamelCase : str = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = [mem.copy() for i in range(6 )]
UpperCamelCase : List[Any] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase : Dict = Text('''Model''' , font_size=24 )
UpperCamelCase : Any = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[str] = []
for i, rect in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = fill.copy().set_fill(__SCREAMING_SNAKE_CASE , opacity=0.8 )
target.move_to(__SCREAMING_SNAKE_CASE )
model_arr.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__SCREAMING_SNAKE_CASE , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__SCREAMING_SNAKE_CASE )
self.add(*__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = [meta_mem.copy() for i in range(6 )]
UpperCamelCase : List[str] = [meta_mem.copy() for i in range(6 )]
UpperCamelCase : Any = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase : Optional[int] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase : Dict = VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase : str = Text('''Disk''' , font_size=24 )
UpperCamelCase : Dict = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
disk.move_to([-4, -1.25, 0] )
self.add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase : List[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Optional[Any] = Square(0.3 )
input.set_fill(__SCREAMING_SNAKE_CASE , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __SCREAMING_SNAKE_CASE , buff=0.5 )
self.play(Write(__SCREAMING_SNAKE_CASE ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__SCREAMING_SNAKE_CASE , buff=0.02 )
self.play(MoveToTarget(__SCREAMING_SNAKE_CASE ) )
self.play(FadeOut(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : List[Any] = Arrow(start=__SCREAMING_SNAKE_CASE , end=__SCREAMING_SNAKE_CASE , color=__SCREAMING_SNAKE_CASE , buff=0.5 )
a.next_to(model_arr[0].get_left() , __SCREAMING_SNAKE_CASE , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
UpperCamelCase : int = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE , run_time=3 ) )
UpperCamelCase : Union[str, Any] = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(__SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(model_cpu_arr[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
UpperCamelCase : Union[str, Any] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __SCREAMING_SNAKE_CASE , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
UpperCamelCase : int = AnimationGroup(
FadeOut(__SCREAMING_SNAKE_CASE , run_time=0.5 ) , MoveToTarget(__SCREAMING_SNAKE_CASE , run_time=0.5 ) , FadeIn(__SCREAMING_SNAKE_CASE , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__SCREAMING_SNAKE_CASE )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
UpperCamelCase : Tuple = 0.7
self.play(
Circumscribe(model_arr[i] , **__SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i] , **__SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i + 1] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[i + 1] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[-1] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
UpperCamelCase : str = a_c
UpperCamelCase : Optional[int] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__SCREAMING_SNAKE_CASE ) , FadeOut(__SCREAMING_SNAKE_CASE , run_time=0.5 ) , )
UpperCamelCase : List[str] = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE , run_time=3 ) , MoveToTarget(__SCREAMING_SNAKE_CASE ) )
self.wait()
| 362
|
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase : Union[str, Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 315
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.