code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class _UpperCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : str , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : str ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
requires_backends(self , """vision""" )
self.check_model_type(snake_case__ )
def __call__( self : Dict , lowerCAmelCase__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return super().__call__(snake_case__ , **snake_case__ )
def UpperCamelCase__ ( self : List[Any] , **lowerCAmelCase__ : Tuple ):
"""simple docstring"""
return {}, {}, {}
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = load_image(snake_case__ )
__SCREAMING_SNAKE_CASE : str = image.size
__SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(images=snake_case__ , return_tensors=self.framework )
return model_inputs
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model(**snake_case__ )
return model_outputs
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = model_outputs.predicted_depth
__SCREAMING_SNAKE_CASE : Dict = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=snake_case__ )
__SCREAMING_SNAKE_CASE : Any = prediction.squeeze().cpu().numpy()
__SCREAMING_SNAKE_CASE : Tuple = (output * 2_5_5 / np.max(snake_case__ )).astype("""uint8""" )
__SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(snake_case__ )
__SCREAMING_SNAKE_CASE : List[Any] = {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = predicted_depth
__SCREAMING_SNAKE_CASE : Any = depth
return output_dict
| 112
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase_ : Union[str, Any] = data_utils.TransfoXLTokenizer
lowercase_ : int = data_utils.TransfoXLCorpus
lowercase_ : Dict = data_utils
lowercase_ : Dict = data_utils
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case_ , "rb" ) as fp:
_UpperCAmelCase = pickle.load(snake_case_ , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCAmelCase = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
_UpperCAmelCase = corpus.vocab.__dict__
torch.save(snake_case_ , snake_case_ )
_UpperCAmelCase = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , snake_case_ )
_UpperCAmelCase = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCAmelCase = os.path.abspath(snake_case_ )
_UpperCAmelCase = os.path.abspath(snake_case_ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCAmelCase = TransfoXLConfig()
else:
_UpperCAmelCase = TransfoXLConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = TransfoXLLMHeadModel(snake_case_ )
_UpperCAmelCase = load_tf_weights_in_transfo_xl(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
_UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
_UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
print(f"""Save PyTorch model to {os.path.abspath(snake_case_ )}""" )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {os.path.abspath(snake_case_ )}""" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowercase_ : List[Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 133
| 0
|
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 355
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a_ : Optional[int] = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
a_ : Optional[int] = {
'169M': 7_68,
'430M': 10_24,
'1B5': 20_48,
'3B': 25_60,
'7B': 40_96,
'14B': 51_20,
}
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = list(state_dict.keys())
for name in state_dict_keys:
SCREAMING_SNAKE_CASE = state_dict.pop(_UpperCAmelCase)
# emb -> embedding
if name.startswith('emb.'):
SCREAMING_SNAKE_CASE = name.replace('emb.' , 'embeddings.')
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0'):
SCREAMING_SNAKE_CASE = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln')
# att -> attention
SCREAMING_SNAKE_CASE = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , _UpperCAmelCase)
# ffn -> feed_forward
SCREAMING_SNAKE_CASE = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , _UpperCAmelCase)
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k'):
SCREAMING_SNAKE_CASE = name.replace('.time_mix_k' , '.time_mix_key')
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v'):
SCREAMING_SNAKE_CASE = name.replace('.time_mix_v' , '.time_mix_value')
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r'):
SCREAMING_SNAKE_CASE = name.replace('.time_mix_r' , '.time_mix_receptance')
if name != "head.weight":
SCREAMING_SNAKE_CASE = 'rwkv.' + name
SCREAMING_SNAKE_CASE = weight
return state_dict
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=None):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.')
SCREAMING_SNAKE_CASE = 5_0277
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b')
else:
SCREAMING_SNAKE_CASE = PreTrainedTokenizerFast(tokenizer_file=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
tokenizer.save_pretrained(_UpperCAmelCase)
# 2. Build the config
SCREAMING_SNAKE_CASE = list(NUM_HIDDEN_LAYERS_MAPPING.keys())
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
SCREAMING_SNAKE_CASE = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.')
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''')
SCREAMING_SNAKE_CASE = RwkvConfig(
vocab_size=_UpperCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_UpperCAmelCase)
# 3. Download model file then convert state_dict
SCREAMING_SNAKE_CASE = hf_hub_download(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location='cpu')
SCREAMING_SNAKE_CASE = convert_state_dict(_UpperCAmelCase)
# 4. Split in shards and save
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = shard_checkpoint(_UpperCAmelCase)
for shard_file, shard in shards.items():
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase))
if index is not None:
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , _UpperCAmelCase)
# Save the index as well
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f:
SCREAMING_SNAKE_CASE = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase) + '\n'
f.write(_UpperCAmelCase)
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.')
SCREAMING_SNAKE_CASE = list(shards.keys())
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
SCREAMING_SNAKE_CASE = torch.load(os.path.join(_UpperCAmelCase , _UpperCAmelCase))
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_UpperCAmelCase , _UpperCAmelCase))
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.')
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase)
model.push_to_hub(_UpperCAmelCase , max_shard_size='2GB')
tokenizer.push_to_hub(_UpperCAmelCase)
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
a_ : Tuple = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 327
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A_ ( _a ):
'''simple docstring'''
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def lowerCAmelCase_ (self ) -> str:
super().setup()
__UpperCAmelCase = self.model.config
__UpperCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__UpperCAmelCase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Any:
__UpperCAmelCase = labels
return self.pre_processor(
[text] * len(lowercase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def lowerCAmelCase_ (self , lowercase__ ) -> Optional[int]:
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 333
|
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )]
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [''''''.join(SCREAMING_SNAKE_CASE ) for row in temp_grid]
__UpperCAmelCase = ''''''.join(SCREAMING_SNAKE_CASE )
return output_string
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = []
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
__UpperCAmelCase = 0
for row in temp_grid: # fills in the characters
__UpperCAmelCase = input_string[counter : counter + len(SCREAMING_SNAKE_CASE )]
grid.append(list(SCREAMING_SNAKE_CASE ) )
counter += len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = '''''' # reads as zigzag
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __a ( SCREAMING_SNAKE_CASE ) -> dict[int, str]:
'''simple docstring'''
__UpperCAmelCase = {}
for key_guess in range(1 , len(SCREAMING_SNAKE_CASE ) ): # tries every key
__UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333
| 1
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = emb.weight.shape
lowerCamelCase__ : int = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase="facebook/mbart-large-en-ro" , _lowerCamelCase=False , _lowerCamelCase=False ):
lowerCamelCase__ : Union[str, Any] = torch.load(_lowerCamelCase , map_location='cpu' )['model']
remove_ignore_keys_(_lowerCamelCase )
lowerCamelCase__ : List[Any] = state_dict['encoder.embed_tokens.weight'].shape[0]
lowerCamelCase__ : Optional[int] = MBartConfig.from_pretrained(_lowerCamelCase , vocab_size=_lowerCamelCase )
if mbart_aa and finetuned:
lowerCamelCase__ : Optional[int] = 'relu'
lowerCamelCase__ : List[Any] = state_dict['decoder.embed_tokens.weight']
lowerCamelCase__ : Union[str, Any] = MBartForConditionalGeneration(_lowerCamelCase )
model.model.load_state_dict(_lowerCamelCase )
if finetuned:
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
A_ : Optional[int] = parser.parse_args()
A_ : List[Any] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 316
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316
| 1
|
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
A_ : Optional[int] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
A_ : Dict = parser.parse_args()
A_ : Dict = "cpu"
A_ : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
A_ : int = "path-to-your-trained-model"
A_ : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
A_ : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
A_ : Optional[Any] = pipe.to(device)
# to channels last
A_ : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last)
A_ : Any = pipe.vae.to(memory_format=torch.channels_last)
A_ : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
A_ : int = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
A_ : Optional[Any] = torch.randn(2, 4, 64, 64)
A_ : Optional[int] = torch.rand(1) * 999
A_ : Optional[Any] = torch.randn(2, 77, 768)
A_ : Tuple = (sample, timestep, encoder_hidden_status)
try:
A_ : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
A_ : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
A_ : List[str] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
A_ : Tuple = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
A_ : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
A_ : Any = 666
A_ : Optional[Any] = torch.Generator(device).manual_seed(seed)
A_ : Optional[int] = {"generator": generator}
if args.steps is not None:
A_ : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
A_ : Union[str, Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 165
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
with open(snake_case__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ = json.load(snake_case__ )
SCREAMING_SNAKE_CASE__ = LukeConfig(use_entity_aware_attention=snake_case__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ = torch.load(snake_case__ , map_location="""cpu""" )
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ = load_entity_vocab(snake_case__ )
SCREAMING_SNAKE_CASE__ = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ = AddedToken("""<ent>""" , lstrip=snake_case__ , rstrip=snake_case__ )
SCREAMING_SNAKE_CASE__ = AddedToken("""<ent2>""" , lstrip=snake_case__ , rstrip=snake_case__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE__ = LukeTokenizer.from_pretrained(snake_case__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ = state_dict["""embeddings.word_embeddings.weight"""]
SCREAMING_SNAKE_CASE__ = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ = f"""encoder.layer.{layer_index}.attention.self."""
SCREAMING_SNAKE_CASE__ = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
SCREAMING_SNAKE_CASE__ = entity_emb[entity_vocab["""[MASK]"""]]
SCREAMING_SNAKE_CASE__ = LukeModel(config=snake_case__ ).eval()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(snake_case__ , strict=snake_case__ )
if not (len(snake_case__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"""Missing keys {", ".join(snake_case__ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
SCREAMING_SNAKE_CASE__ = LukeTokenizer.from_pretrained(snake_case__ , task="""entity_classification""" )
SCREAMING_SNAKE_CASE__ = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
SCREAMING_SNAKE_CASE__ = (39, 42)
SCREAMING_SNAKE_CASE__ = tokenizer(snake_case__ , entity_spans=[span] , add_prefix_space=snake_case__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = model(**snake_case__ )
# Verify word hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE__ = torch.Size((1, 42, 10_24) )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
SCREAMING_SNAKE_CASE__ = torch.Size((1, 42, 7_68) )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1, 10_24) )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1, 7_68) )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(snake_case__ ) )
model.save_pretrained(snake_case__ )
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = line.rstrip().split("""\t""" )
SCREAMING_SNAKE_CASE__ = index
return entity_vocab
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
A_ : Optional[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 165
| 1
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Any = DebertaTokenizer
a__ : List[str] = True
a__ : Optional[Any] = DebertaTokenizerFast
def UpperCamelCase__ ( self) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase :Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
__UpperCamelCase :List[str] = dict(zip(__lowercase , range(len(__lowercase))))
__UpperCamelCase :Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
__UpperCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(__lowercase) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__lowercase))
def UpperCamelCase__ ( self , **__lowercase) -> List[Any]:
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> Any:
__UpperCamelCase :List[Any] = '''lower newer'''
__UpperCamelCase :Union[str, Any] = '''lower newer'''
return input_text, output_text
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :int = self.get_tokenizer()
__UpperCamelCase :Any = '''lower newer'''
__UpperCamelCase :Optional[int] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCamelCase :Optional[int] = tokenizer.tokenize(__lowercase)
self.assertListEqual(__lowercase , __lowercase)
__UpperCamelCase :str = tokens + [tokenizer.unk_token]
__UpperCamelCase :Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase) , __lowercase)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :int = self.get_tokenizer()
__UpperCamelCase :Union[str, Any] = tokenizer('''Hello''' , '''World''')
__UpperCamelCase :Optional[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __lowercase)
@slow
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :int = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''')
__UpperCamelCase :Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase)
__UpperCamelCase :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase)
__UpperCamelCase :Tuple = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase)
__UpperCamelCase :Optional[Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase)
__UpperCamelCase :Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowercase)
__UpperCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
__UpperCamelCase :str = tokenizer_class.from_pretrained('''microsoft/deberta-base''')
__UpperCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
__UpperCamelCase :int = tokenizer(__lowercase , padding=__lowercase)
__UpperCamelCase :List[Any] = [tokenizer.decode(__lowercase , skip_special_tokens=__lowercase) for seq in encoding['''input_ids''']]
# fmt: off
__UpperCamelCase :Tuple = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__UpperCamelCase :Optional[Any] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __lowercase)
for expected, decoded in zip(__lowercase , __lowercase):
self.assertEqual(__lowercase , __lowercase)
| 105
|
from __future__ import annotations
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(SCREAMING_SNAKE_CASE ):
print(f"""{i}\t\t{d}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for j in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = [float('''inf''' )] * vertex_count
__UpperCamelCase :str = 0.0
for _ in range(vertex_count - 1 ):
for j in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__UpperCamelCase :Any = distance[u] + w
__UpperCamelCase :Tuple = check_negative_cycle(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase = int(input('''Enter number of vertices: ''').strip())
__lowercase = int(input('''Enter number of edges: ''').strip())
__lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
__lowercase , __lowercase , __lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
__lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
__lowercase = int(input('''\nEnter shortest path source:''').strip())
__lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 105
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> list[int]:
'''simple docstring'''
lowerCAmelCase : Any = 0
lowerCAmelCase : Tuple = len(_UpperCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCAmelCase : Dict = i + 1
else:
lowerCAmelCase : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 138
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=lowerCAmelCase )
class __A :
lowerCAmelCase_ : str
lowerCAmelCase_ : str
lowerCAmelCase_ : Optional[str] = None
lowerCAmelCase_ : Optional[str] = None
lowerCAmelCase_ : Optional[str] = None
@dataclass(frozen=lowerCAmelCase )
class __A :
lowerCAmelCase_ : List[int]
lowerCAmelCase_ : Optional[List[int]] = None
lowerCAmelCase_ : Optional[List[int]] = None
lowerCAmelCase_ : Optional[Union[int, float]] = None
lowerCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : List[InputFeatures]
def __init__( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str=False , UpperCAmelCase_ : bool = False , ):
lowerCAmelCase : List[Any] = hans_processors[task]()
lowerCAmelCase : Tuple = os.path.join(
UpperCAmelCase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(UpperCAmelCase_ ) , UpperCAmelCase_ , ) , )
lowerCAmelCase : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase , lowerCAmelCase : List[Any] = label_list[2], label_list[1]
lowerCAmelCase : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase : Any = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_ ):
if os.path.exists(UpperCAmelCase_ ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
lowerCAmelCase : int = torch.load(UpperCAmelCase_ )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
lowerCAmelCase : Optional[int] = (
processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ )
)
logger.info('Training examples: %s' , len(UpperCAmelCase_ ) )
lowerCAmelCase : List[str] = hans_convert_examples_to_features(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
logger.info('Saving features into cached file %s' , UpperCAmelCase_ )
torch.save(self.features , UpperCAmelCase_ )
def __len__( self : str ):
return len(self.features )
def __getitem__( self : Optional[Any] , UpperCAmelCase_ : List[str] ):
return self.features[i]
def lowercase__ ( self : int ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A :
lowerCAmelCase_ : List[InputFeatures]
def __init__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = 128 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : bool = False , ):
lowerCAmelCase : List[Any] = hans_processors[task]()
lowerCAmelCase : List[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase , lowerCAmelCase : int = label_list[2], label_list[1]
lowerCAmelCase : str = label_list
lowerCAmelCase : Union[str, Any] = processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = hans_convert_examples_to_features(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(UpperCAmelCase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCAmelCase : Tuple = tf.data.Dataset.from_generator(
UpperCAmelCase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowercase__ ( self : Dict ):
return self.dataset
def __len__( self : Optional[int] ):
return len(self.features )
def __getitem__( self : int , UpperCAmelCase_ : List[Any] ):
return self.features[i]
def lowercase__ ( self : int ):
return self.label_list
class __A ( lowerCAmelCase ):
def lowercase__ ( self : Dict , UpperCAmelCase_ : Dict ):
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Any ):
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def lowercase__ ( self : Optional[Any] ):
return ["contradiction", "entailment", "neutral"]
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : List[str] = []
for i, line in enumerate(UpperCAmelCase_ ):
if i == 0:
continue
lowerCAmelCase : Union[str, Any] = '%s-%s' % (set_type, line[0])
lowerCAmelCase : Optional[int] = line[5]
lowerCAmelCase : Optional[int] = line[6]
lowerCAmelCase : Dict = line[7][2:] if line[7].startswith('ex' ) else line[7]
lowerCAmelCase : List[str] = line[0]
examples.append(InputExample(guid=UpperCAmelCase_ , text_a=UpperCAmelCase_ , text_b=UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
return examples
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ) -> Dict:
'''simple docstring'''
lowerCAmelCase : List[Any] = {label: i for i, label in enumerate(_UpperCAmelCase )}
lowerCAmelCase : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ), desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d' % (ex_index) )
lowerCAmelCase : Any = tokenizer(
example.text_a, example.text_b, add_special_tokens=_UpperCAmelCase, max_length=_UpperCAmelCase, padding='max_length', truncation=_UpperCAmelCase, return_overflowing_tokens=_UpperCAmelCase, )
lowerCAmelCase : Union[str, Any] = label_map[example.label] if example.label in label_map else 0
lowerCAmelCase : Optional[Any] = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase, label=_UpperCAmelCase, pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(f"guid: {example}" )
logger.info(f"features: {features[i]}" )
return features
__A : Union[str, Any] = {
'''hans''': 3,
}
__A : List[Any] = {
'''hans''': HansProcessor,
}
| 138
| 1
|
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class __UpperCamelCase :
lowercase : Union[str, Any] =None
lowercase : int =False
lowercase : str =False
lowercase : List[Any] =False
lowercase : Dict =None
lowercase : List[Any] =None
lowercase : Any =False
lowercase : List[Any] =False
lowercase : int =False
lowercase : Tuple =True
lowercase : Union[str, Any] =None
lowercase : Optional[Any] =1
lowercase : List[Any] =None
lowercase : List[str] =False
lowercase : Tuple =None
lowercase : str =None
def lowercase__ ( self ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase ) for k, v in self.__dict__.items()} )
| 365
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =ShapEImgaImgPipeline
lowercase : Dict =['image']
lowercase : str =['image']
lowercase : int =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowercase : int =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 8
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
lowerCamelCase_ =CLIPVisionModel(lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CLIPImageProcessor(
crop_size=224, do_center_crop=lowerCAmelCase, do_normalize=lowerCAmelCase, do_resize=lowerCAmelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=224, )
return image_processor
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCamelCase_ =PriorTransformer(**lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase_ =ShapERenderer(**lowerCAmelCase )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_prior
lowerCamelCase_ =self.dummy_image_encoder
lowerCamelCase_ =self.dummy_image_processor
lowerCamelCase_ =self.dummy_renderer
lowerCamelCase_ =HeunDiscreteScheduler(
beta_schedule='''exp''', num_train_timesteps=1_024, prediction_type='''sample''', use_karras_sigmas=lowerCAmelCase, clip_sample=lowerCAmelCase, clip_sample_range=1.0, )
lowerCamelCase_ ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
lowerCamelCase_ =output.images[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase_ =np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch_device == '''cpu'''
lowerCamelCase_ =True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =1
lowerCamelCase_ =2
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase_ =batch_size * [inputs[key]]
lowerCamelCase_ =pipe(**lowerCAmelCase, num_images_per_prompt=lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCamelCase_ =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCamelCase_ =pipe(
lowerCAmelCase, generator=lowerCAmelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
| 6
| 0
|
"""simple docstring"""
import pprint
import requests
_UpperCAmelCase = """https://zenquotes.io/api"""
def __magic_name__ ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __magic_name__ ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
_UpperCAmelCase = random_quotes()
pprint.pprint(response)
| 173
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: str =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Any =1
SCREAMING_SNAKE_CASE_: Tuple =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =1
SCREAMING_SNAKE_CASE_: Optional[int] =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 173
| 1
|
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A : str = logging.get_logger(__name__)
__A : str = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __UpperCamelCase ( lowercase__ ):
lowercase : Union[str, Any] = 'detr'
lowercase : Dict = ['past_key_values']
lowercase : Tuple = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self :List[Any] ,_UpperCamelCase :int=True ,_UpperCamelCase :Tuple=None ,_UpperCamelCase :str=3 ,_UpperCamelCase :Tuple=1_0_0 ,_UpperCamelCase :Union[str, Any]=6 ,_UpperCamelCase :Union[str, Any]=2_0_4_8 ,_UpperCamelCase :int=8 ,_UpperCamelCase :List[Any]=6 ,_UpperCamelCase :Union[str, Any]=2_0_4_8 ,_UpperCamelCase :int=8 ,_UpperCamelCase :List[Any]=0.0 ,_UpperCamelCase :Optional[int]=0.0 ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Union[str, Any]="relu" ,_UpperCamelCase :Optional[Any]=2_5_6 ,_UpperCamelCase :Optional[Any]=0.1 ,_UpperCamelCase :Union[str, Any]=0.0 ,_UpperCamelCase :Optional[Any]=0.0 ,_UpperCamelCase :Union[str, Any]=0.02 ,_UpperCamelCase :Tuple=1.0 ,_UpperCamelCase :List[str]=False ,_UpperCamelCase :Any="sine" ,_UpperCamelCase :Tuple="resnet50" ,_UpperCamelCase :int=True ,_UpperCamelCase :Any=False ,_UpperCamelCase :Dict=1 ,_UpperCamelCase :Union[str, Any]=5 ,_UpperCamelCase :Optional[Any]=2 ,_UpperCamelCase :List[Any]=1 ,_UpperCamelCase :Optional[int]=1 ,_UpperCamelCase :Dict=5 ,_UpperCamelCase :List[Any]=2 ,_UpperCamelCase :List[Any]=0.1 ,**_UpperCamelCase :Dict ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(_UpperCamelCase ,_UpperCamelCase ):
snake_case_ : Any = backbone_config.get("""model_type""" )
snake_case_ : Tuple = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(_UpperCamelCase )
# set timm attributes to None
snake_case_ : List[str] = None, None, None
snake_case_ : Optional[int] = use_timm_backbone
snake_case_ : Optional[int] = backbone_config
snake_case_ : Dict = num_channels
snake_case_ : Optional[int] = num_queries
snake_case_ : Union[str, Any] = d_model
snake_case_ : int = encoder_ffn_dim
snake_case_ : Optional[int] = encoder_layers
snake_case_ : List[Any] = encoder_attention_heads
snake_case_ : List[Any] = decoder_ffn_dim
snake_case_ : Optional[int] = decoder_layers
snake_case_ : List[Any] = decoder_attention_heads
snake_case_ : Optional[int] = dropout
snake_case_ : List[str] = attention_dropout
snake_case_ : Dict = activation_dropout
snake_case_ : List[str] = activation_function
snake_case_ : Any = init_std
snake_case_ : int = init_xavier_std
snake_case_ : Dict = encoder_layerdrop
snake_case_ : Tuple = decoder_layerdrop
snake_case_ : Optional[int] = encoder_layers
snake_case_ : Union[str, Any] = auxiliary_loss
snake_case_ : List[str] = position_embedding_type
snake_case_ : str = backbone
snake_case_ : int = use_pretrained_backbone
snake_case_ : Dict = dilation
# Hungarian matcher
snake_case_ : List[Any] = class_cost
snake_case_ : Any = bbox_cost
snake_case_ : Tuple = giou_cost
# Loss coefficients
snake_case_ : Optional[int] = mask_loss_coefficient
snake_case_ : str = dice_loss_coefficient
snake_case_ : List[Any] = bbox_loss_coefficient
snake_case_ : Tuple = giou_loss_coefficient
snake_case_ : Any = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCamelCase ,**_UpperCamelCase )
@property
def a__ ( self :Union[str, Any] ):
return self.encoder_attention_heads
@property
def a__ ( self :int ):
return self.d_model
@classmethod
def a__ ( cls :int ,_UpperCamelCase :PretrainedConfig ,**_UpperCamelCase :Any ):
return cls(backbone_config=_UpperCamelCase ,**_UpperCamelCase )
def a__ ( self :int ):
snake_case_ : Tuple = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ : Optional[Any] = self.backbone_config.to_dict()
snake_case_ : Dict = self.__class__.model_type
return output
class __UpperCamelCase ( lowercase__ ):
lowercase : Tuple = version.parse('1.11' )
@property
def a__ ( self :int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def a__ ( self :Union[str, Any] ):
return 1E-5
@property
def a__ ( self :Optional[int] ):
return 1_2
| 367
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __UpperCamelCase ( lowercase__ ):
lowercase : Union[List[PIL.Image.Image], np.ndarray]
lowercase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 8
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
|
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
_snake_case = TypeVar('_T')
class UpperCamelCase ( Generic[_T] ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Iterable[_T] | None = None ) -> None:
_a : list[_T] = list(iterable or [] )
_a : list[_T] = []
def __len__( self : str ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[str] ) -> str:
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : _T ) -> None:
self._stacka.append(UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> _T:
_a : Any = self._stacka.pop
_a : Union[str, Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 294
| 1
|
import argparse
from collections import defaultdict
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ) -> Any:
__lowerCamelCase = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(__lowerCAmelCase , '''r''' ) as f:
__lowerCamelCase = f.readlines()
__lowerCamelCase = f'''class {class_name}('''
__lowerCamelCase = f'''{4 * ' '}def {test_name}('''
__lowerCamelCase = f'''{8 * ' '}{correct_line.split()[0]}'''
__lowerCamelCase = f'''{16 * ' '}{correct_line.split()[0]}'''
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = []
for line in lines:
if line.startswith(__lowerCAmelCase ):
__lowerCamelCase = True
elif in_class and line.startswith(__lowerCAmelCase ):
__lowerCamelCase = True
elif in_class and in_func and (line.startswith(__lowerCAmelCase ) or line.startswith(__lowerCAmelCase )):
__lowerCamelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
__lowerCamelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__lowerCamelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * ' '}{correct_line}''' )
__lowerCamelCase = __lowerCamelCase = __lowerCamelCase = __lowerCamelCase = False
else:
new_lines.append(__lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' ) as f:
for line in new_lines:
f.write(__lowerCAmelCase )
def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=None ) -> Tuple:
if fail is not None:
with open(__lowerCAmelCase , '''r''' ) as f:
__lowerCamelCase = {l.strip() for l in f.readlines()}
else:
__lowerCamelCase = None
with open(__lowerCAmelCase , '''r''' ) as f:
__lowerCamelCase = f.readlines()
__lowerCamelCase = defaultdict(__lowerCAmelCase )
for line in correct_lines:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 339
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
SCREAMING_SNAKE_CASE__ : Tuple = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
SCREAMING_SNAKE_CASE__ : List[str] = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def __magic_name__ ( __lowerCAmelCase : Any ) -> int:
__lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowerCAmelCase )[0]
@deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> str:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream:
__lowerCamelCase = _readaa(__lowerCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = bytestream.read(rows * cols * num_images )
__lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta )
__lowerCamelCase = data.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 )
return data
@deprecated(__lowerCAmelCase , '''Please use tf.one_hot on tensors.''' )
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> Dict:
__lowerCamelCase = labels_dense.shape[0]
__lowerCamelCase = numpy.arange(__lowerCAmelCase ) * num_classes
__lowerCamelCase = numpy.zeros((num_labels, num_classes) )
__lowerCamelCase = 1
return labels_one_hot
@deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=10 ) -> List[str]:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream:
__lowerCamelCase = _readaa(__lowerCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = bytestream.read(__lowerCAmelCase )
__lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__lowerCAmelCase , __lowerCAmelCase )
return labels
class lowerCAmelCase__ :
@deprecated(
SCREAMING_SNAKE_CASE__ , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=None , ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase = random_seed.get_seed(SCREAMING_SNAKE_CASE__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__lowerCamelCase = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
__lowerCamelCase = 1_00_00
__lowerCamelCase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
__lowerCamelCase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__lowerCamelCase = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__lowerCamelCase = images.astype(numpy.floataa )
__lowerCamelCase = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 )
__lowerCamelCase = images
__lowerCamelCase = labels
__lowerCamelCase = 0
__lowerCamelCase = 0
@property
def __A ( self : str ) -> Optional[int]:
return self._images
@property
def __A ( self : Any ) -> Dict:
return self._labels
@property
def __A ( self : List[Any] ) -> int:
return self._num_examples
@property
def __A ( self : str ) -> Any:
return self._epochs_completed
def __A ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str=True ) -> str:
if fake_data:
__lowerCamelCase = [1] * 7_84
__lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(SCREAMING_SNAKE_CASE__ )],
[fake_label for _ in range(SCREAMING_SNAKE_CASE__ )],
)
__lowerCamelCase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__lowerCamelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.images[perma]
__lowerCamelCase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__lowerCamelCase = self._num_examples - start
__lowerCamelCase = self._images[start : self._num_examples]
__lowerCamelCase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__lowerCamelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.images[perm]
__lowerCamelCase = self.labels[perm]
# Start next epoch
__lowerCamelCase = 0
__lowerCamelCase = batch_size - rest_num_examples
__lowerCamelCase = self._index_in_epoch
__lowerCamelCase = self._images[start:end]
__lowerCamelCase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__lowerCamelCase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__lowerCAmelCase , '''Please write your own downloading logic.''' )
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
if not gfile.Exists(__lowerCAmelCase ):
gfile.MakeDirs(__lowerCAmelCase )
__lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not gfile.Exists(__lowerCAmelCase ):
urllib.request.urlretrieve(__lowerCAmelCase , __lowerCAmelCase ) # noqa: S310
with gfile.GFile(__lowerCAmelCase ) as f:
__lowerCamelCase = f.size()
print('''Successfully downloaded''' , __lowerCAmelCase , __lowerCAmelCase , '''bytes.''' )
return filepath
@deprecated(
__lowerCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[str]=dtypes.floataa , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=5000 , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[str]=DEFAULT_SOURCE_URL , ) -> Optional[Any]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__lowerCAmelCase , one_hot=__lowerCAmelCase , dtype=__lowerCAmelCase , seed=__lowerCAmelCase )
__lowerCamelCase = fake()
__lowerCamelCase = fake()
__lowerCamelCase = fake()
return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
if not source_url: # empty string check
__lowerCamelCase = DEFAULT_SOURCE_URL
__lowerCamelCase = '''train-images-idx3-ubyte.gz'''
__lowerCamelCase = '''train-labels-idx1-ubyte.gz'''
__lowerCamelCase = '''t10k-images-idx3-ubyte.gz'''
__lowerCamelCase = '''t10k-labels-idx1-ubyte.gz'''
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + train_images_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_images(__lowerCAmelCase )
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + train_labels_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase )
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + test_images_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_images(__lowerCAmelCase )
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + test_labels_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase )
if not 0 <= validation_size <= len(__lowerCAmelCase ):
__lowerCamelCase = (
'''Validation size should be between 0 and '''
f'''{len(__lowerCAmelCase )}. Received: {validation_size}.'''
)
raise ValueError(__lowerCAmelCase )
__lowerCamelCase = train_images[:validation_size]
__lowerCamelCase = train_labels[:validation_size]
__lowerCamelCase = train_images[validation_size:]
__lowerCamelCase = train_labels[validation_size:]
__lowerCamelCase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
__lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
__lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
| 339
| 1
|
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 205
|
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :Optional[Any] , *lowercase_ :int , lowercase_ :Any=None , lowercase_ :List[str]=None , **lowercase_ :Any )-> Any:
super().__init__(*lowercase_ , **lowercase_ )
A__ = eval_examples
A__ = post_process_function
def UpperCAmelCase_ ( self :str , lowercase_ :str=None , lowercase_ :Optional[int]=None , lowercase_ :Optional[int]=None , lowercase_ :str = "eval" )-> Union[str, Any]:
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(lowercase_ )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
A__ = time.time()
try:
A__ = eval_loop(
lowercase_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(lowercase_ , lowercase_ , output.predictions )
A__ = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
A__ = metrics.pop(lowercase_ )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def UpperCAmelCase_ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :str , lowercase_ :Any=None , lowercase_ :str = "test" )-> List[Any]:
A__ = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
A__ = time.time()
try:
A__ = eval_loop(
lowercase_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(lowercase_ , lowercase_ , output.predictions , "predict" )
A__ = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
A__ = metrics.pop(lowercase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
| 237
| 0
|
from math import factorial, radians
def lowerCamelCase__ ( A__ : float , A__ : int = 18 , A__ : int = 10 ):
'''simple docstring'''
__lowerCamelCase = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowerCamelCase = radians(A__ )
__lowerCamelCase = angle_in_radians
__lowerCamelCase = 3
__lowerCamelCase = -1
for _ in range(A__ ):
result += (b * (angle_in_radians**a)) / factorial(A__ )
__lowerCamelCase = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(A__ , A__ )
if __name__ == "__main__":
__import__('doctest').testmod()
| 29
|
import string
import numpy
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowerCamelCase__:
UpperCAmelCase__ : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda __lowerCamelCase: x % 36)
UpperCAmelCase__ : List[Any] = numpy.vectorize(__lowerCamelCase)
def __init__( self: List[Any] , UpperCamelCase_: numpy.ndarray ):
__lowerCamelCase = self.modulus(UpperCamelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.key_string.index(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
return self.key_string[round(UpperCamelCase_ )]
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase_ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase_ ) % self.break_key != 0:
chars.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[
0
]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[0]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(A__ ):
__lowerCamelCase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowerCamelCase = HillCipher(numpy.array(A__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29
| 1
|
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__lowerCamelCase = flax_key_tuple[:-1] + ('''weight''',)
__lowerCamelCase = torch.permute(UpperCamelCase__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase__ ):
# linear layer
__lowerCamelCase = flax_key_tuple[:-1] + ('''weight''',)
__lowerCamelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowerCamelCase = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
if "metadata" in layer:
__lowerCamelCase = layer.split('''metadata''' )
__lowerCamelCase = ''''''.join(split_layer[0] )[:-1]
__lowerCamelCase = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
__lowerCamelCase = layer.split('''kvstore''' )
__lowerCamelCase = ''''''.join(split_layer[0] )[:-1]
__lowerCamelCase = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
__lowerCamelCase = layer.split('''/''' )
__lowerCamelCase = '''/'''.join(split_layer[:-1] )
__lowerCamelCase = (split_layer[-1],)
if "kvstore/path" in layer:
__lowerCamelCase = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
__lowerCamelCase = '''file'''
else:
__lowerCamelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
__lowerCamelCase = rename_keys(UpperCamelCase__ )
__lowerCamelCase = {}
for k, v in current_block.items():
__lowerCamelCase = v
__lowerCamelCase = new_current_block
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = WEIGHTS_NAME ) -> Tuple:
__lowerCamelCase = convert_file_size_to_int(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = 0
__lowerCamelCase = 0
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
__lowerCamelCase = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
__lowerCamelCase = flatten_dict(UpperCamelCase__ , sep='''/''' )
__lowerCamelCase = {}
for layer in checkpoint_info.keys():
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = get_key_and_tensorstore_dict(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if curr_real_layer_name in all_layers:
__lowerCamelCase = content
else:
__lowerCamelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__lowerCamelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__lowerCamelCase = torch.tensor(UpperCamelCase__ )
__lowerCamelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__lowerCamelCase , __lowerCamelCase = rename_base_flax_keys(tuple(key.split('''/''' ) ) , UpperCamelCase__ )
__lowerCamelCase = '''/'''.join(UpperCamelCase__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__lowerCamelCase = os.path.join(
UpperCamelCase__ , weights_name.replace('''.bin''' , f"""-{len(UpperCamelCase__ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__lowerCamelCase = {}
__lowerCamelCase = 0
__lowerCamelCase = raw_weights.to(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__lowerCamelCase = os.path.join(UpperCamelCase__ , weights_name.replace('''.bin''' , f"""-{len(UpperCamelCase__ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCamelCase__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__lowerCamelCase = {}
__lowerCamelCase = {}
for idx, shard in enumerate(UpperCamelCase__ ):
__lowerCamelCase = weights_name.replace(
'''.bin''' , f"""-{idx+1:05d}-of-{len(UpperCamelCase__ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
__lowerCamelCase = os.path.join(UpperCamelCase__ , weights_name.replace('''.bin''' , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
__lowerCamelCase = shard
for key in shard:
__lowerCamelCase = shard_file
# Add the metadata
__lowerCamelCase = {'''total_size''': total_size}
__lowerCamelCase = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' , encoding='''utf-8''' ) as f:
__lowerCamelCase = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + '''\n'''
f.write(UpperCamelCase__ )
return metadata, index
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCAmelCase =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __lowerCAmelCase ( ) -> List[Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__lowerCamelCase = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
__lowerCamelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
__lowerCamelCase = TaTokenizer.from_pretrained('''t5-small''' )
__lowerCamelCase = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
__lowerCamelCase = tokenizer(UpperCamelCase__ , return_tensors='''pt''' ).input_ids
__lowerCamelCase = model.generate(UpperCamelCase__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 67
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCamelCase ( _A , _A ):
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = features.copy() if features else default_expected_features
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
lowerCAmelCase_ = features.copy() if features else default_expected_features
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __UpperCamelCase ( _A , _A ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowerCAmelCase_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
lowerCAmelCase_ = features.copy()
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read()
_check_json_dataset(_A , _A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __UpperCamelCase ( _A , _A , _A ):
if issubclass(_A , _A ):
lowerCAmelCase_ = jsonl_path
elif issubclass(_A , _A ):
lowerCAmelCase_ = [jsonl_path]
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
def __UpperCamelCase ( _A , _A , _A=("train",) ):
assert isinstance(_A , _A )
for split in splits:
lowerCAmelCase_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = features.copy() if features else default_expected_features
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , features=_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCamelCase ( _A , _A , _A ):
if split:
lowerCAmelCase_ = {split: jsonl_path}
else:
lowerCAmelCase_ = '''train'''
lowerCAmelCase_ = {'''train''': jsonl_path, '''test''': jsonl_path}
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __UpperCamelCase ( _A ):
return json.load(_A )
def __UpperCamelCase ( _A ):
return [json.loads(_A ) for line in buffer]
class A :
@pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__ ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
assert isinstance(exported_content[0], UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''', [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
], )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__ ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, num_proc=2 ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
assert isinstance(exported_content[0], UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''', [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
], )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__, num_proc=2 ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
with pytest.raises(UpperCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, num_proc=0 )
@pytest.mark.parametrize('''compression, extension''', [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / f"test.json.{extension}"
lowerCAmelCase_ = str(shared_datadir / f"test_file.json.{extension}" )
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, compression=UpperCamelCase__ ).write()
with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f:
lowerCAmelCase_ = f.read()
with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f:
lowerCAmelCase_ = f.read()
assert exported_content == original_content
| 278
| 0
|
import socket
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
UpperCamelCase__ = socket.gethostname()
UpperCamelCase__ = 1_2312
sock.connect((host, port) )
sock.send(b'''Hello server!''' )
with open('''Received_file''', '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
UpperCamelCase__ = sock.recv(1024 )
if not data:
break
out_file.write(UpperCamelCase__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 371
|
from __future__ import annotations
from collections import Counter
from random import random
class __lowercase :
'''simple docstring'''
def __init__( self : List[Any] ):
UpperCamelCase__ = {}
def A_ ( self : List[Any] , _a : str ):
UpperCamelCase__ = {}
def A_ ( self : List[Any] , _a : str , _a : str , _a : float ):
if nodea not in self.connections:
self.add_node(_a )
if nodea not in self.connections:
self.add_node(_a )
UpperCamelCase__ = probability
def A_ ( self : Optional[Any] ):
return list(self.connections )
def A_ ( self : Tuple , _a : str ):
UpperCamelCase__ = 0
UpperCamelCase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : list[tuple[str, str, float]], UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase__ = Counter(graph.get_nodes() )
UpperCamelCase__ = start
for _ in range(UpperCamelCase__ ):
UpperCamelCase__ = graph.transition(UpperCamelCase__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 0
|
'''simple docstring'''
from __future__ import annotations
def a__ ( lowercase : int = 4 ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase = abs(a__ ) or 4
return [[1 + x + y * row_size for x in range(a__ )] for y in range(a__ )]
def a__ ( lowercase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a__ ) )
# OR.. transpose(reverse_column(matrix))
def a__ ( lowercase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a__ ) )
# OR.. reverse_column(reverse_row(matrix))
def a__ ( lowercase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a__ ) )
# OR.. transpose(reverse_row(matrix))
def a__ ( lowercase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase = [list(a__ ) for x in zip(*a__ )]
return matrix
def a__ ( lowercase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase = matrix[::-1]
return matrix
def a__ ( lowercase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase = [x[::-1] for x in matrix]
return matrix
def a__ ( lowercase : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a__ )
if __name__ == "__main__":
lowercase__ : Optional[Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
lowercase__ : Union[str, Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
lowercase__ : List[Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 324
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
_UpperCAmelCase = parser.parse_args()
return args
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
def fn(a__: str ):
return tokenizer(examples['text'] )
return fn
def lowerCAmelCase__ ( a__: List[str] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(tokenized_data['input_ids'] ) ):
_UpperCAmelCase = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
_UpperCAmelCase = tf.train.Features(feature=a__ )
_UpperCAmelCase = tf.train.Example(features=a__ )
_UpperCAmelCase = example.SerializeToString()
records.append(a__ )
return records
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_UpperCAmelCase = min(len(a__ ) , args.limit )
_UpperCAmelCase = dataset.select(range(a__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(a__ ):
os.makedirs(a__ )
else:
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_UpperCAmelCase = tokenize_function(a__ )
_UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a__: Optional[int] ):
# Concatenate all texts.
_UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
_UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_UpperCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_UpperCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for shard in range(0 , len(a__ ) , args.shard_size ):
_UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size]
_UpperCAmelCase = len(dataset_snapshot['input_ids'] )
_UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_UpperCAmelCase = get_serialized_examples(a__ )
with tf.io.TFRecordWriter(a__ ) as out_file:
for i in range(len(a__ ) ):
_UpperCAmelCase = serialized_examples[i]
out_file.write(a__ )
print('Wrote file {} containing {} records'.format(a__ , a__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = parse_args()
main(args)
| 329
| 0
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : List[Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A : Optional[Any] = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A : List[str] = False
__A : Optional[int] = False
def __lowercase ( self , lowercase , lowercase , lowercase=False) -> Optional[int]:
'''simple docstring'''
a__ : Any = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase)
if return_labels:
if model_class in get_values(lowercase):
a__ : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
return inputs_dict
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[Any]:
'''simple docstring'''
a__ : Any = parent
a__ : Any = batch_size
a__ : Tuple = seq_length
a__ : Dict = is_training
a__ : Optional[int] = use_input_mask
a__ : List[str] = use_token_type_ids
a__ : Union[str, Any] = use_labels
a__ : Any = vocab_size
a__ : Any = hidden_size
a__ : List[Any] = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : Dict = intermediate_size
a__ : Optional[int] = hidden_act
a__ : Optional[int] = hidden_dropout_prob
a__ : Union[str, Any] = attention_probs_dropout_prob
a__ : Tuple = max_position_embeddings
a__ : Tuple = type_vocab_size
a__ : Optional[Any] = type_sequence_label_size
a__ : Any = initializer_range
a__ : Optional[int] = num_labels
a__ : Tuple = num_choices
a__ : List[Any] = scope
a__ : Optional[Any] = embedding_size
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ : Any = None
if self.use_input_mask:
a__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
a__ : List[Any] = None
if self.use_token_type_ids:
a__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__ : Dict = None
a__ : Union[str, Any] = None
a__ : List[str] = None
if self.use_labels:
a__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__ : str = ids_tensor([self.batch_size] , self.num_choices)
a__ : Any = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__ : str = TFMobileBertModel(config=lowercase)
a__ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a__ : Optional[int] = model(lowercase)
a__ : Optional[Any] = [input_ids, input_mask]
a__ : Dict = model(lowercase)
a__ : str = model(lowercase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__ : Optional[int] = TFMobileBertForMaskedLM(config=lowercase)
a__ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a__ : Tuple = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__ : Tuple = TFMobileBertForNextSentencePrediction(config=lowercase)
a__ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a__ : List[str] = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__ : str = TFMobileBertForPreTraining(config=lowercase)
a__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a__ : int = model(lowercase)
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__ : Dict = self.num_labels
a__ : int = TFMobileBertForSequenceClassification(config=lowercase)
a__ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a__ : int = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] = self.num_choices
a__ : int = TFMobileBertForMultipleChoice(config=lowercase)
a__ : str = tf.tile(tf.expand_dims(lowercase , 1) , (1, self.num_choices, 1))
a__ : str = tf.tile(tf.expand_dims(lowercase , 1) , (1, self.num_choices, 1))
a__ : Optional[int] = tf.tile(tf.expand_dims(lowercase , 1) , (1, self.num_choices, 1))
a__ : Optional[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
a__ : List[str] = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__ : str = self.num_labels
a__ : int = TFMobileBertForTokenClassification(config=lowercase)
a__ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a__ : List[str] = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__ : List[Any] = TFMobileBertForQuestionAnswering(config=lowercase)
a__ : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a__ : List[str] = model(lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : List[Any] = config_and_inputs
a__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Dict = TFMobileBertModelTest.TFMobileBertModelTester(self)
a__ : Tuple = ConfigTester(self , config_class=lowercase , hidden_size=37)
def __lowercase ( self) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase)
@slow
def __lowercase ( self) -> Dict:
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
a__ : Tuple = TFMobileBertModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Any = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased')
a__ : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
a__ : Union[str, Any] = model(lowercase)[0]
a__ : List[str] = [1, 6, 3_0522]
self.assertEqual(output.shape , lowercase)
a__ : List[str] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
])
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1e-4)
| 225
|
def A_ ( A__ , A__ , A__ ) -> float:
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
a__ : str = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
a__ : List[Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225
| 1
|
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A = None, A = None, A = False, A = False, A = None, A = None, **A, ):
'''simple docstring'''
super().__init__(
features=_A, cache_dir=_A, keep_in_memory=_A, streaming=_A, num_proc=_A, **_A, )
SCREAMING_SNAKE_CASE : str = Generator(
cache_dir=_A, features=_A, generator=_A, gen_kwargs=_A, **_A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
self.builder.download_and_prepare(
download_config=_A, download_mode=_A, verification_mode=_A, base_path=_A, num_proc=self.num_proc, )
SCREAMING_SNAKE_CASE : List[str] = self.builder.as_dataset(
split='train', verification_mode=_A, in_memory=self.keep_in_memory )
return dataset
| 251
|
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_a : List[Any]= re.compile(R"\s+")
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> int:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(UpperCAmelCase_ , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def __UpperCAmelCase ( UpperCAmelCase_ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case : Any = [len(UpperCAmelCase_ ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(UpperCAmelCase_ ), "line_max": max(UpperCAmelCase_ )}
def __UpperCAmelCase ( UpperCAmelCase_ : List[Any] ) -> str:
'''simple docstring'''
__snake_case : Tuple = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def __UpperCAmelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=5 ) -> str:
'''simple docstring'''
__snake_case : Tuple = ['auto-generated', 'autogenerated', 'automatically generated']
__snake_case : Tuple = example['content'].splitlines()
for _, line in zip(range(UpperCAmelCase_ ) , UpperCAmelCase_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]=5 , UpperCAmelCase_ : Optional[int]=0.05 ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = ['unit tests', 'test file', 'configuration file']
__snake_case : Tuple = example['content'].splitlines()
__snake_case : Tuple = 0
__snake_case : Any = 0
# first test
for _, line in zip(range(UpperCAmelCase_ ) , UpperCAmelCase_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__snake_case : int = example['content'].count('\n' )
__snake_case : str = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Any = ['def ', 'class ', 'for ', 'while ']
__snake_case : Optional[int] = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=4 ) -> Dict:
'''simple docstring'''
__snake_case : Optional[Any] = example['content'].splitlines()
__snake_case : Tuple = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case : List[Any] = tokenizer(example['content'] , truncation=UpperCAmelCase_ )['input_ids']
__snake_case : Union[str, Any] = len(example['content'] ) / len(UpperCAmelCase_ )
return {"ratio": ratio}
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> str:
'''simple docstring'''
__snake_case : List[Any] = {}
results.update(get_hash(UpperCAmelCase_ ) )
results.update(line_stats(UpperCAmelCase_ ) )
results.update(alpha_stats(UpperCAmelCase_ ) )
results.update(char_token_ratio(UpperCAmelCase_ ) )
results.update(is_autogenerated(UpperCAmelCase_ ) )
results.update(is_config_or_test(UpperCAmelCase_ ) )
results.update(has_no_keywords(UpperCAmelCase_ ) )
results.update(has_few_assignments(UpperCAmelCase_ ) )
return results
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : str ) -> Any:
'''simple docstring'''
if not check_uniques(UpperCAmelCase_ , UpperCAmelCase_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( UpperCAmelCase_ : Any ) -> Any:
'''simple docstring'''
with open(UpperCAmelCase_ , 'rb' ) as f_in:
with gzip.open(str(UpperCAmelCase_ ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(UpperCAmelCase_ , UpperCAmelCase_ )
os.unlink(UpperCAmelCase_ )
# Settings
_a : int= HfArgumentParser(PreprocessingArguments)
_a : Union[str, Any]= parser.parse_args()
if args.num_workers is None:
_a : str= multiprocessing.cpu_count()
_a : Optional[int]= AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_a : Tuple= time.time()
_a : Dict= load_dataset(args.dataset_name, split="train")
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
_a : str= time.time()
_a : int= ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
_a : Tuple= set(ds.unique("hash"))
_a : Optional[int]= len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
_a : Union[str, Any]= time.time()
_a : List[Any]= ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_a : Tuple= time.time()
_a, _a : Tuple= deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
_a : Union[str, Any]= Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
_a : List[Any]= output_dir / "data"
data_dir.mkdir(exist_ok=True)
_a : Tuple= time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_a : List[str]= str(data_dir / f'''file-{file_number+1:012}.json''')
_a : List[str]= min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 172
| 0
|
"""simple docstring"""
from __future__ import annotations
_UpperCamelCase: Union[str, Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class a__ :
def __init__( self : Tuple, lowerCAmelCase : dict[str, list[str]], lowerCAmelCase : str ) -> None:
lowercase : Any = graph
# mapping node to its parent in resulting breadth first tree
lowercase : dict[str, str | None] = {}
lowercase : List[Any] = source_vertex
def lowercase ( self : List[str] ) -> None:
lowercase : Dict = {self.source_vertex}
lowercase : Tuple = None
lowercase : int = [self.source_vertex] # first in first out queue
while queue:
lowercase : Any = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCAmelCase )
lowercase : Any = vertex
queue.append(lowerCAmelCase )
def lowercase ( self : Dict, lowerCAmelCase : str ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
lowercase : Tuple = self.parent.get(lowerCAmelCase )
if target_vertex_parent is None:
lowercase : Optional[int] = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCAmelCase )
return self.shortest_path(lowerCAmelCase ) + f'''->{target_vertex}'''
if __name__ == "__main__":
_UpperCamelCase: List[str] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 53
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase: str = logging.get_logger(__name__)
_UpperCamelCase: Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'mgp-str'
def __init__( self : Tuple, lowerCAmelCase : str=[32, 128], lowerCAmelCase : List[Any]=4, lowerCAmelCase : Union[str, Any]=3, lowerCAmelCase : Union[str, Any]=27, lowerCAmelCase : Union[str, Any]=38, lowerCAmelCase : Tuple=50257, lowerCAmelCase : Dict=30522, lowerCAmelCase : Optional[int]=768, lowerCAmelCase : Optional[int]=12, lowerCAmelCase : Optional[int]=12, lowerCAmelCase : Union[str, Any]=4.0, lowerCAmelCase : Any=True, lowerCAmelCase : Optional[int]=False, lowerCAmelCase : Optional[int]=1e-5, lowerCAmelCase : List[str]=0.0, lowerCAmelCase : Optional[Any]=0.0, lowerCAmelCase : List[str]=0.0, lowerCAmelCase : Dict=False, lowerCAmelCase : Union[str, Any]=0.02, **lowerCAmelCase : Optional[int], ) -> List[Any]:
super().__init__(**lowerCAmelCase )
lowercase : int = image_size
lowercase : Dict = patch_size
lowercase : List[str] = num_channels
lowercase : Union[str, Any] = max_token_length
lowercase : str = num_character_labels
lowercase : Tuple = num_bpe_labels
lowercase : Tuple = num_wordpiece_labels
lowercase : Optional[Any] = hidden_size
lowercase : Tuple = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : Tuple = mlp_ratio
lowercase : Union[str, Any] = distilled
lowercase : List[str] = layer_norm_eps
lowercase : Optional[int] = drop_rate
lowercase : Tuple = qkv_bias
lowercase : int = attn_drop_rate
lowercase : Any = drop_path_rate
lowercase : Optional[Any] = output_aa_attentions
lowercase : Optional[Any] = initializer_range
| 53
| 1
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase__ ( datasets.BeamBasedBuilder):
def __A (self ) -> List[Any]:
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=lowerCamelCase__ , )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> str:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase__ )
class lowerCamelCase__ ( datasets.BeamBasedBuilder):
def __A (self ) -> Any:
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=lowerCamelCase__ , )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> int:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase__ )
def UpperCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def UpperCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase__ ( __lowerCAmelCase):
@require_beam
def __A (self ) -> Union[str, Any]:
_lowercase =len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowercase =DummyBeamDataset(cache_dir=lowerCamelCase__ , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
_lowercase =builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCamelCase__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCamelCase__ )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def __A (self ) -> str:
import apache_beam as beam
_lowercase =beam.io.parquetio.WriteToParquet
_lowercase =len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowercase =DummyBeamDataset(cache_dir=lowerCamelCase__ , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
_lowercase =partial(lowerCamelCase__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
_lowercase =builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCamelCase__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCamelCase__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def __A (self ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowercase =DummyBeamDataset(cache_dir=lowerCamelCase__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A (self ) -> Optional[int]:
_lowercase =len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowercase =NestedBeamDataset(cache_dir=lowerCamelCase__ , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
_lowercase =builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCamelCase__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCamelCase__ )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 5
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__UpperCamelCase : Dict = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _a ( SCREAMING_SNAKE_CASE : str = "dhaka" , SCREAMING_SNAKE_CASE : int = 5 ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = min(SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
UpperCamelCase__ : str = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
UpperCamelCase__ : List[str] = requests.get('''https://www.google.com/search''' , params=SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = BeautifulSoup(html.text , '''html.parser''' )
UpperCamelCase__ : Union[str, Any] = ''''''.join(
re.findall(r'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
UpperCamelCase__ : Optional[Any] = json.dumps(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = json.loads(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = re.findall(
r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
UpperCamelCase__ : Optional[Any] = re.sub(
r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(SCREAMING_SNAKE_CASE ) , )
UpperCamelCase__ : List[Any] = re.findall(
r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
UpperCamelCase__ : Optional[int] = bytes(SCREAMING_SNAKE_CASE , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCamelCase__ : List[Any] = bytes(SCREAMING_SNAKE_CASE , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCamelCase__ : List[Any] = urllib.request.build_opener()
UpperCamelCase__ : Optional[Any] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = F"query_{query.replace(' ' , '_' )}"
if not os.path.exists(SCREAMING_SNAKE_CASE ):
os.makedirs(SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
SCREAMING_SNAKE_CASE , F"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
__UpperCamelCase : List[Any] = download_images_from_google_query(sys.argv[1])
print(f"{image_count} images were downloaded to disk.")
except IndexError:
print("Please provide a search term.")
raise
| 146
| 0
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
UpperCamelCase = 500_000
UpperCamelCase , UpperCamelCase = os.path.split(__file__)
UpperCamelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __lowerCamelCase ( snake_case__ ,**snake_case__ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dataset.map(**snake_case__ )
@get_duration
def __lowerCamelCase ( snake_case__ ,**snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dataset.filter(**snake_case__ )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
_SCREAMING_SNAKE_CASE = generate_example_dataset(
os.path.join(snake_case__ ,"""dataset.arrow""" ) ,snake_case__ ,num_examples=snake_case__ )
_SCREAMING_SNAKE_CASE = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" ,use_fast=snake_case__ )
def tokenize(snake_case__ ):
return tokenizer(examples["""text"""] )
_SCREAMING_SNAKE_CASE = map(snake_case__ )
_SCREAMING_SNAKE_CASE = map(snake_case__ ,batched=snake_case__ )
_SCREAMING_SNAKE_CASE = map(snake_case__ ,function=lambda snake_case__ : None ,batched=snake_case__ )
with dataset.formatted_as(type="""numpy""" ):
_SCREAMING_SNAKE_CASE = map(snake_case__ ,function=lambda snake_case__ : None ,batched=snake_case__ )
with dataset.formatted_as(type="""pandas""" ):
_SCREAMING_SNAKE_CASE = map(snake_case__ ,function=lambda snake_case__ : None ,batched=snake_case__ )
with dataset.formatted_as(type="""torch""" ,columns="""numbers""" ):
_SCREAMING_SNAKE_CASE = map(snake_case__ ,function=lambda snake_case__ : None ,batched=snake_case__ )
with dataset.formatted_as(type="""tensorflow""" ,columns="""numbers""" ):
_SCREAMING_SNAKE_CASE = map(snake_case__ ,function=lambda snake_case__ : None ,batched=snake_case__ )
_SCREAMING_SNAKE_CASE = map(snake_case__ ,function=snake_case__ ,batched=snake_case__ )
_SCREAMING_SNAKE_CASE = filter(snake_case__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(snake_case__ ,"""wb""" ) as f:
f.write(json.dumps(snake_case__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 350
|
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
for i in range(length - 1 ):
_SCREAMING_SNAKE_CASE = i
for k in range(i + 1 ,snake_case__ ):
if collection[k] < collection[least]:
_SCREAMING_SNAKE_CASE = k
if least != i:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (collection[i], collection[least])
return collection
if __name__ == "__main__":
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 125
| 0
|
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase__ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCAmelCase__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase__ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCAmelCase__ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCAmelCase__ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
assert ReadMe.from_string(A__ , A__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with pytest.raises(A__ , match=re.escape(expected_error.format(path='''root''' ) ) ):
__lowercase = ReadMe.from_string(A__ , A__ )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with pytest.raises(A__ , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(A__ , A__ )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( A__ ):
"""simple docstring"""
ReadMe.from_string(A__ , A__ , suppress_parsing_errors=A__ )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = Path(A__ ) / '''README.md'''
with open(A__ , '''w+''' ) as readme_file:
readme_file.write(A__ )
__lowercase = ReadMe.from_readme(A__ , A__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = Path(A__ ) / '''README.md'''
with open(A__ , '''w+''' ) as readme_file:
readme_file.write(A__ )
__lowercase = expected_error.format(path=A__ )
with pytest.raises(A__ , match=re.escape(A__ ) ):
__lowercase = ReadMe.from_readme(A__ , A__ )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = Path(A__ ) / '''README.md'''
with open(A__ , '''w+''' ) as readme_file:
readme_file.write(A__ )
__lowercase = expected_error.format(path=A__ )
with pytest.raises(A__ , match=re.escape(A__ ) ):
ReadMe.from_readme(A__ , A__ )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( A__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = Path(A__ ) / '''README.md'''
with open(A__ , '''w+''' ) as readme_file:
readme_file.write(A__ )
ReadMe.from_readme(A__ , A__ , suppress_parsing_errors=A__ )
| 104
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __A( a ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
if tokenize_kwargs is None:
__a = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
__a = truncation
__a = tokenize_kwargs
__a = {}
if return_tensors is not None:
__a = return_tensors
return preprocess_params, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , **_snake_case ) -> Dict[str, GenericTensor]:
'''simple docstring'''
__a = self.framework
__a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.model(**_snake_case )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False ) -> Optional[int]:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_snake_case , **_snake_case ) -> Any:
'''simple docstring'''
return super().__call__(*_snake_case , **_snake_case )
| 6
| 0
|
SCREAMING_SNAKE_CASE_:List[str] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def __UpperCamelCase ( _lowerCAmelCase ) -> int:
"""simple docstring"""
A : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
SCREAMING_SNAKE_CASE_:list[bool | None] = [None] * 10_000_000
SCREAMING_SNAKE_CASE_:List[str] = True
SCREAMING_SNAKE_CASE_:Any = False
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A : Optional[int] = chain(next_number(_lowerCAmelCase ) )
A : Optional[Any] = number_chain
while number < 1000_0000:
A : Any = number_chain
number *= 10
return number_chain
def __UpperCamelCase ( _lowerCAmelCase = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , _lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 358
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Dict = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : int = "altclip_text_model"
def __init__( self, lowerCamelCase__=25_0002, lowerCamelCase__=1024, lowerCamelCase__=24, lowerCamelCase__=16, lowerCamelCase__=4096, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=514, lowerCamelCase__=1, lowerCamelCase__=0.02, lowerCamelCase__=0.02, lowerCamelCase__=1e-05, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__="absolute", lowerCamelCase__=True, lowerCamelCase__=768, **lowerCamelCase__, ):
super().__init__(pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, **lowerCamelCase__ )
A : Union[str, Any] = vocab_size
A : Dict = hidden_size
A : Union[str, Any] = num_hidden_layers
A : List[str] = num_attention_heads
A : str = hidden_act
A : Dict = intermediate_size
A : List[str] = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : Tuple = max_position_embeddings
A : Optional[Any] = type_vocab_size
A : Optional[Any] = initializer_range
A : Optional[int] = initializer_factor
A : Tuple = layer_norm_eps
A : List[str] = position_embedding_type
A : int = use_cache
A : int = project_dim
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "altclip_vision_model"
def __init__( self, lowerCamelCase__=768, lowerCamelCase__=3072, lowerCamelCase__=512, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3, lowerCamelCase__=224, lowerCamelCase__=32, lowerCamelCase__="quick_gelu", lowerCamelCase__=1e-5, lowerCamelCase__=0.0, lowerCamelCase__=0.02, lowerCamelCase__=1.0, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Optional[Any] = hidden_size
A : Optional[int] = intermediate_size
A : Union[str, Any] = projection_dim
A : str = num_hidden_layers
A : int = num_attention_heads
A : Optional[Any] = num_channels
A : Tuple = patch_size
A : List[Any] = image_size
A : Optional[int] = initializer_range
A : Union[str, Any] = initializer_factor
A : List[str] = attention_dropout
A : int = layer_norm_eps
A : str = hidden_act
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__, **lowerCamelCase__ ):
cls._set_token_in_kwargs(lowerCamelCase__ )
A , A : Optional[Any] = cls.get_config_dict(lowerCamelCase__, **lowerCamelCase__ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
A : Any = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase__, **lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : List[Any] = "altclip"
__lowerCamelCase : List[Any] = True
def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=768, lowerCamelCase__=2.6592, **lowerCamelCase__ ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
A : Dict = kwargs.pop("""text_config_dict""", lowerCamelCase__ )
A : str = kwargs.pop("""vision_config_dict""", lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
A : Dict = {}
# This is the complete result when using `text_config_dict`.
A : str = AltCLIPTextConfig(**lowerCamelCase__ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
A : Optional[Any] = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
A : Optional[int] = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCamelCase__ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
A : int = {}
# This is the complete result when using `vision_config_dict`.
A : Union[str, Any] = AltCLIPVisionConfig(**lowerCamelCase__ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
A : Optional[int] = {
str(lowerCamelCase__ ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
A : Optional[int] = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
A : Any = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCamelCase__ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
A : Tuple = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
A : Union[str, Any] = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
A : Dict = AltCLIPTextConfig(**lowerCamelCase__ )
A : Optional[int] = AltCLIPVisionConfig(**lowerCamelCase__ )
A : List[str] = projection_dim
A : Any = logit_scale_init_value
A : Tuple = 1.0
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ):
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : str = copy.deepcopy(self.__dict__ )
A : Any = self.text_config.to_dict()
A : List[str] = self.vision_config.to_dict()
A : Union[str, Any] = self.__class__.model_type
return output
| 115
| 0
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __A ( __lowerCAmelCase )-> Union[str, Any]:
"""simple docstring"""
for param in module.parameters():
_UpperCAmelCase = False
def __A ( )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCAmelCase = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def __A ( __lowerCAmelCase )-> Any:
"""simple docstring"""
_UpperCAmelCase = plt.imshow(SCREAMING_SNAKE_CASE__ )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE__ )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE__ )
plt.show()
def __A ( )-> Dict:
"""simple docstring"""
_UpperCAmelCase = datetime.now()
_UpperCAmelCase = current_time.strftime('%H:%M:%S' )
return timestamp
| 39
|
from __future__ import annotations
from collections.abc import Generator
def __SCREAMING_SNAKE_CASE ():
snake_case_ = {}
snake_case_ = 2
while True:
snake_case_ = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if factor:
snake_case_ = factor + prime
while x in factor_map:
x += factor
snake_case_ = factor
else:
snake_case_ = prime
yield prime
prime += 1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1E10 ):
snake_case_ = sieve()
snake_case_ = 1
while True:
snake_case_ = next(SCREAMING_SNAKE_CASE__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(SCREAMING_SNAKE_CASE__ )
n += 2
if __name__ == "__main__":
print(solution())
| 8
| 0
|
def __UpperCamelCase ( ):
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
lowercase__ =generate_large_matrix()
lowercase__ =(
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __UpperCamelCase ( lowerCAmelCase__ : Any ):
assert all(row == sorted(a__ , reverse=a__ ) for row in grid )
assert all(list(a__ ) == sorted(a__ , reverse=a__ ) for col in zip(*a__ ) )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
__a : str = 0
__a : List[Any] = len(a__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__a : Optional[Any] = (left + right) // 2
__a : Optional[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__a : List[str] = mid + 1
else:
__a : str = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(a__ )
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
__a : Dict = 0
__a : List[Any] = len(grid[0] )
for i in range(len(a__ ) ):
__a : Optional[int] = find_negative_index(grid[i][:bound] )
total += bound
return (len(a__ ) * len(grid[0] )) - total
def __UpperCamelCase ( lowerCAmelCase__ : Any ):
return len([number for row in grid for number in row if number < 0] )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
__a : Any = 0
for row in grid:
for i, number in enumerate(a__ ):
if number < 0:
total += len(a__ ) - i
break
return total
def __UpperCamelCase ( ):
from timeit import timeit
print('''Running benchmarks''' )
__a : Any = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__a : Dict = timeit(f"{func}(grid=grid)" , setup=a__ , number=5_0_0 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 369
|
def __UpperCamelCase ( lowerCAmelCase__ : list[list[int | float]] ):
__a : int = len(lowerCAmelCase__ )
__a : Dict = len(matrix[0] )
__a : Union[str, Any] = min(lowerCAmelCase__ , lowerCAmelCase__ )
for row in range(lowerCAmelCase__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , lowerCAmelCase__ ):
__a : Dict = matrix[col][row] / matrix[row][row]
for i in range(lowerCAmelCase__ , lowerCAmelCase__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__a : Optional[int] = True
for i in range(row + 1 , lowerCAmelCase__ ):
if matrix[i][row] != 0:
__a , __a : Any = matrix[i], matrix[row]
__a : Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(lowerCAmelCase__ ):
__a : Optional[Any] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90
| 0
|
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCamelCase_ = {"facebook/bart-base": BartTokenizer}
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' ,type=__UpperCamelCase ,default=5 ,help='The maximum total input sequence length after tokenization.' ,)
parser.add_argument(
'--num_beams' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) ,)
parser.add_argument(
'--model_name_or_path' ,type=__UpperCamelCase ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__UpperCamelCase ,)
parser.add_argument(
'--config_name' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='Pretrained config name or path if not the same as model_name' ,)
parser.add_argument(
'--device' ,type=__UpperCamelCase ,default='cpu' ,help='Device where the model will be run' ,)
parser.add_argument('--output_file_path' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='Where to store the final ONNX file.' )
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
return args
def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: List[str]="cpu" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = model_dict[model_name].from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = tokenizer_dict[model_name].from_pretrained(__UpperCamelCase )
if model_name in ["facebook/bart-base"]:
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = 0
return huggingface_model, tokenizer
def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: List[str] ,__UpperCamelCase: int ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: int ):
"""simple docstring"""
model.eval()
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Tuple = torch.jit.script(BARTBeamSearchGenerator(__UpperCamelCase ) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = 'My friends are cool but they eat too many carbs.'
SCREAMING_SNAKE_CASE : Any = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=10_24 ,return_tensors='pt' ).to(model.device )
SCREAMING_SNAKE_CASE : Tuple = model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,num_beams=__UpperCamelCase ,max_length=__UpperCamelCase ,early_stopping=__UpperCamelCase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
__UpperCamelCase ,(
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,__UpperCamelCase ,opset_version=14 ,input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] ,output_names=['output_ids'] ,dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} ,example_outputs=__UpperCamelCase ,)
logger.info('Model exported to {}'.format(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = remove_dup_initializers(os.path.abspath(__UpperCamelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : str = onnxruntime.InferenceSession(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = ort_sess.run(
__UpperCamelCase ,{
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(__UpperCamelCase ),
'max_length': np.array(__UpperCamelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = parse_args()
SCREAMING_SNAKE_CASE : Union[str, Any] = 5
SCREAMING_SNAKE_CASE : Optional[Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.device(args.device )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = load_model_tokenizer(args.model_name_or_path ,__UpperCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(__UpperCamelCase )
if args.max_length:
SCREAMING_SNAKE_CASE : Any = args.max_length
if args.num_beams:
SCREAMING_SNAKE_CASE : List[Any] = args.num_beams
if args.output_file_path:
SCREAMING_SNAKE_CASE : Optional[Any] = args.output_file_path
else:
SCREAMING_SNAKE_CASE : Any = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 251
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["MaskFormerFeatureExtractor"]
UpperCamelCase_ = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
UpperCamelCase_ = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 251
| 1
|
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE :Dict = '''▁'''
__SCREAMING_SNAKE_CASE :Union[str, Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Tuple = BertGenerationTokenizer
_lowerCamelCase : str = False
_lowerCamelCase : str = True
def lowercase ( self : Union[str, Any] ):
super().setUp()
_UpperCAmelCase = BertGenerationTokenizer(snake_case_ , keep_accents=snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = "<s>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowercase ( self : Tuple ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(snake_case_ ) , 1_0_0_2 )
def lowercase ( self : Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def lowercase ( self : Tuple ):
_UpperCAmelCase = BertGenerationTokenizer(snake_case_ , keep_accents=snake_case_ )
_UpperCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
_UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase ( self : List[str] ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def lowercase ( self : Any ):
_UpperCAmelCase = "Hello World!"
_UpperCAmelCase = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@slow
def lowercase ( self : int ):
_UpperCAmelCase = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_UpperCAmelCase = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@require_torch
@slow
def lowercase ( self : Union[str, Any] ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_UpperCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
_UpperCAmelCase = " ".join(snake_case_ )
_UpperCAmelCase = self.big_tokenizer.encode_plus(snake_case_ , return_tensors="pt" , return_token_type_ids=snake_case_ )
_UpperCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=snake_case_ )
_UpperCAmelCase = BertGenerationConfig()
_UpperCAmelCase = BertGenerationEncoder(snake_case_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**snake_case_ )
model(**snake_case_ )
@slow
def lowercase ( self : Optional[Any] ):
# fmt: off
_UpperCAmelCase = {"input_ids": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 156
|
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : str
_lowerCamelCase : int
def UpperCAmelCase_ ( __lowercase : str ) -> list[str]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowercase ) )]
def UpperCAmelCase_ ( __lowercase : str ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_UpperCAmelCase = all_rotations(__lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_UpperCAmelCase = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowercase ),
}
return response
def UpperCAmelCase_ ( __lowercase : str , __lowercase : int ) -> str:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_UpperCAmelCase = int(__lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_UpperCAmelCase = [""] * len(__lowercase )
for _ in range(len(__lowercase ) ):
for i in range(len(__lowercase ) ):
_UpperCAmelCase = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''Provide a string that I will generate its BWT transform: '''
__SCREAMING_SNAKE_CASE :Dict = input(entry_msg).strip()
__SCREAMING_SNAKE_CASE :Optional[int] = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
__SCREAMING_SNAKE_CASE :Optional[int] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 156
| 1
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__UpperCAmelCase = 4
__UpperCAmelCase = 3
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
for shard in shards:
for i in range(__snake_case ):
yield {"i": i, "shard": shard}
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = int(os.environ['RANK'] )
UpperCAmelCase_ : int = int(os.environ['WORLD_SIZE'] )
UpperCAmelCase_ : Dict = ArgumentParser()
parser.add_argument('--streaming' , type=__snake_case )
parser.add_argument('--local_rank' , type=__snake_case )
parser.add_argument('--num_workers' , type=__snake_case , default=0 )
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[int] = args.streaming
UpperCAmelCase_ : Optional[Any] = args.num_workers
UpperCAmelCase_ : Any = {'shards': [F"shard_{shard_idx}" for shard_idx in range(__snake_case )]}
UpperCAmelCase_ : List[Any] = IterableDataset.from_generator(__snake_case , gen_kwargs=__snake_case )
if not streaming:
UpperCAmelCase_ : Optional[int] = Dataset.from_list(list(__snake_case ) )
UpperCAmelCase_ : List[Any] = split_dataset_by_node(__snake_case , rank=__snake_case , world_size=__snake_case )
UpperCAmelCase_ : str = torch.utils.data.DataLoader(__snake_case , num_workers=__snake_case )
UpperCAmelCase_ : List[str] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCAmelCase_ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCAmelCase_ : Optional[Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 29
|
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29
| 1
|
def UpperCamelCase ( snake_case__ : float , snake_case__ : int ) -> float:
if digit_amount > 0:
return round(number - int(snake_case__ ) , snake_case__ )
return number - int(snake_case__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 103
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=4, ) -> Dict:
UpperCamelCase : Optional[int] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : Optional[int] = seq_length
UpperCamelCase : Any = is_training
UpperCamelCase : Tuple = use_attention_mask
UpperCamelCase : Dict = use_token_type_ids
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : Any = vocab_size
UpperCamelCase : Any = hidden_size
UpperCamelCase : str = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Any = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : int = type_vocab_size
UpperCamelCase : Optional[int] = type_sequence_label_size
UpperCamelCase : str = initializer_range
UpperCamelCase : Tuple = num_choices
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : Dict = None
if self.use_attention_mask:
UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : int = None
if self.use_token_type_ids:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase : Optional[int] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = config_and_inputs
UpperCamelCase : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : int = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = config_and_inputs
UpperCamelCase : Dict = True
UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Optional[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : int = FlaxRobertaPreLayerNormModelTester(self )
@slow
def snake_case_ ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
UpperCamelCase : List[str] = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa )
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : List[Any] = [1, 11, 5_0265]
self.assertEqual(list(output.shape ), SCREAMING_SNAKE_CASE_ )
# compare the actual values for a slice.
UpperCamelCase : Optional[int] = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa )
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
UpperCamelCase : Any = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
| 103
| 1
|
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case ( *UpperCamelCase : str , UpperCamelCase : Optional[Union[Dict, Any]] = None , UpperCamelCase : Tuple=True , UpperCamelCase : Optional[int]=2 ):
from .. import __version__
UpperCAmelCase : Tuple = take_from
UpperCAmelCase : Optional[Any] = ()
if not isinstance(args[0] , UpperCamelCase ):
UpperCAmelCase : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCamelCase ).base_version ) >= version.parse(UpperCamelCase ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
UpperCAmelCase : Optional[int] = None
if isinstance(UpperCamelCase , UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCamelCase ),)
UpperCAmelCase : List[str] = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCamelCase , UpperCamelCase ):
values += (getattr(UpperCamelCase , UpperCamelCase ),)
UpperCAmelCase : List[Any] = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
UpperCAmelCase : int = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
UpperCAmelCase : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , UpperCamelCase , stacklevel=UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) > 0:
UpperCAmelCase : Optional[int] = inspect.getouterframes(inspect.currentframe() )[1]
UpperCAmelCase : Union[str, Any] = call_frame.filename
UpperCAmelCase : List[Any] = call_frame.lineno
UpperCAmelCase : List[str] = call_frame.function
UpperCAmelCase , UpperCAmelCase : Optional[int] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCamelCase ) == 0:
return
elif len(UpperCamelCase ) == 1:
return values[0]
return values
| 109
|
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Optional[int] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f"{test_file} instead." )
snake_case__ : Dict = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
snake_case__ : int = components[:-1] + [test_fn.replace(""".py""" , """""" )]
snake_case__ : int = """.""".join(_lowerCAmelCase )
return test_module_path
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : str = get_module_path(_lowerCAmelCase )
snake_case__ : Union[str, Any] = importlib.import_module(_lowerCAmelCase )
return test_module
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : List[Any] = []
snake_case__ : Optional[int] = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : List[str] = []
snake_case__ : Any = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
snake_case__ : List[str] = getattr(_lowerCAmelCase , """all_model_classes""" , [] )
if len(_lowerCAmelCase ) > 0:
test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : Any = get_test_classes(_lowerCAmelCase )
snake_case__ : Optional[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
snake_case__ : Optional[int] = test_class()
if hasattr(_lowerCAmelCase , """setUp""" ):
test.setUp()
snake_case__ : Any = None
if hasattr(_lowerCAmelCase , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
snake_case__ : Tuple = test.model_tester.__class__
return model_tester
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
snake_case__ : Union[str, Any] = get_test_classes(_lowerCAmelCase )
snake_case__ : str = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
snake_case__ : Optional[Any] = get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Union[str, Any] = []
for test_class in test_classes:
snake_case__ : Tuple = get_model_tester_from_test_class(_lowerCAmelCase )
if tester_class is not None:
tester_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : Optional[Any] = get_test_classes(_lowerCAmelCase )
snake_case__ : Union[str, Any] = {test_class: get_model_tester_from_test_class(_lowerCAmelCase ) for test_class in test_classes}
return test_tester_mapping
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Any = get_model_classes(_lowerCAmelCase )
snake_case__ : Any = {
model_class: get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_test_mapping
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Union[str, Any] = get_model_classes(_lowerCAmelCase )
snake_case__ : str = {
model_class: get_tester_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def __snake_case( _lowerCAmelCase ) -> int:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o.__name__
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return [to_json(_lowerCAmelCase ) for x in o]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {to_json(_lowerCAmelCase ): to_json(_lowerCAmelCase ) for k, v in o.items()}
else:
return o
| 35
| 0
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
def wrapper(*__UpperCamelCase : int , **__UpperCamelCase : Optional[Any] ):
UpperCAmelCase_ = timeit.default_timer()
UpperCAmelCase_ = func(*__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase_ = timeit.default_timer() - starttime
return delta
UpperCAmelCase_ = func.__name__
return wrapper
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : dict , __UpperCamelCase : List[str]=100 , __UpperCamelCase : List[Any]=None ) -> Union[str, Any]:
UpperCAmelCase_ = []
UpperCAmelCase_ = seq_shapes or {}
for i in range(__UpperCamelCase ):
UpperCAmelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__UpperCamelCase , _ArrayXD ):
UpperCAmelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__UpperCamelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
UpperCAmelCase_ = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__UpperCamelCase , datasets.Sequence ):
while isinstance(__UpperCamelCase , datasets.Sequence ):
UpperCAmelCase_ = v.feature
UpperCAmelCase_ = seq_shapes[k]
UpperCAmelCase_ = np.random.rand(*__UpperCamelCase ).astype(v.dtype )
UpperCAmelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=100 , __UpperCamelCase : str=None ) -> Union[str, Any]:
UpperCAmelCase_ = generate_examples(__UpperCamelCase , num_examples=__UpperCamelCase , seq_shapes=__UpperCamelCase )
with ArrowWriter(features=__UpperCamelCase , path=__UpperCamelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase_ = features.encode_example(__UpperCamelCase )
writer.write(__UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
UpperCAmelCase_ = datasets.Dataset.from_file(filename=__UpperCamelCase , info=datasets.DatasetInfo(features=__UpperCamelCase ) )
return dataset
| 177
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 'data2vec-text'
def __init__( self : Optional[Any] , __snake_case : Optional[int]=3_05_22 , __snake_case : List[str]=7_68 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Union[str, Any]=30_72 , __snake_case : List[Any]="gelu" , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Tuple=5_12 , __snake_case : str=2 , __snake_case : str=0.02 , __snake_case : List[Any]=1E-12 , __snake_case : Any=1 , __snake_case : List[Any]=0 , __snake_case : Dict=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Any=None , **__snake_case : List[Any] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class a ( _A ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self : str ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 177
| 1
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __a :
@staticmethod
def UpperCAmelCase__ ( *__magic_name__ : Dict , **__magic_name__ : str ) -> int:
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __a (unittest.TestCase ):
__a : Any = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
UpperCAmelCase_ : Union[str, Any] = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = vqa_pipeline(__magic_name__ , top_k=1 )
self.assertEqual(
__magic_name__ , [
[{'''score''': ANY(__magic_name__ ), '''answer''': ANY(__magic_name__ )}],
[{'''score''': ANY(__magic_name__ ), '''answer''': ANY(__magic_name__ )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
UpperCAmelCase_ : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ : Union[str, Any] = '''How many cats are there?'''
UpperCAmelCase_ : List[Any] = vqa_pipeline(image=__magic_name__ , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
__magic_name__ , [{'''score''': ANY(__magic_name__ ), '''answer''': ANY(__magic_name__ )}, {'''score''': ANY(__magic_name__ ), '''answer''': ANY(__magic_name__ )}] )
UpperCAmelCase_ : Union[str, Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
__magic_name__ , [{'''score''': ANY(__magic_name__ ), '''answer''': ANY(__magic_name__ )}, {'''score''': ANY(__magic_name__ ), '''answer''': ANY(__magic_name__ )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
UpperCAmelCase_ : Tuple = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ : Optional[Any] = '''How many cats are there?'''
UpperCAmelCase_ : str = vqa_pipeline(image=__magic_name__ , question=__magic_name__ , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}] )
UpperCAmelCase_ : int = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}] )
UpperCAmelCase_ : str = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def UpperCAmelCase__ ( self : int ) -> int:
"""simple docstring"""
pass
| 125
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __a :
__a : int = BlenderbotConfig
__a : Any = {}
__a : str = "gelu"
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=13 , __magic_name__ : Any=7 , __magic_name__ : Optional[Any]=True , __magic_name__ : str=False , __magic_name__ : Any=99 , __magic_name__ : List[Any]=32 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : List[Any]=4 , __magic_name__ : List[str]=37 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[str]=20 , __magic_name__ : List[str]=2 , __magic_name__ : Any=1 , __magic_name__ : Union[str, Any]=0 , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Union[str, Any] = pad_token_id
UpperCAmelCase_ : Tuple = bos_token_id
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ : str = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ : str = prepare_blenderbot_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, inputs_dict
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = TFBlenderbotModel(config=__magic_name__ ).get_decoder()
UpperCAmelCase_ : Union[str, Any] = inputs_dict['''input_ids''']
UpperCAmelCase_ : Any = input_ids[:1, :]
UpperCAmelCase_ : Tuple = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase_ : List[str] = inputs_dict['''head_mask''']
UpperCAmelCase_ : Any = 1
# first forward pass
UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , use_cache=__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ )[0]
UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ : Any = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-3 )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Optional[int]=None, SCREAMING_SNAKE_CASE__ : Optional[int]=None, SCREAMING_SNAKE_CASE__ : Optional[int]=None, SCREAMING_SNAKE_CASE__ : Tuple=None, SCREAMING_SNAKE_CASE__ : Any=None, ) -> Any:
if attention_mask is None:
UpperCAmelCase_ : List[str] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
UpperCAmelCase_ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Tuple = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__a : Tuple = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__a : List[str] = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__a : List[str] = True
__a : Any = False
__a : Optional[int] = False
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = TFBlenderbotModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__magic_name__ )
@require_tokenizers
@require_tf
class __a (unittest.TestCase ):
__a : Union[str, Any] = ["My friends are cool but they eat too many carbs."]
__a : List[Any] = "facebook/blenderbot-400M-distill"
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase_ : List[Any] = self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__magic_name__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 125
| 1
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCamelCase_ = TypeVar('''T''')
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
return (position - 1) // 2
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
return (2 * position) + 1
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
return (2 * position) + 2
class _snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self: str ) -> None:
UpperCAmelCase_ : list[tuple[T, int]] = []
UpperCAmelCase_ : dict[T, int] = {}
UpperCAmelCase_ : int = 0
def __len__( self: List[Any] ) -> int:
return self.elements
def __repr__( self: Union[str, Any] ) -> str:
return str(self.heap )
def A__ ( self: List[Any] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def A__ ( self: Any ,lowerCamelCase_: T ,lowerCamelCase_: int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCAmelCase_ : Dict = self.elements
self.elements += 1
self._bubble_up(lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 ,self.elements - 1 )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.heap[0]
self._bubble_down(lowerCamelCase_ )
return elem
def A__ ( self: str ,lowerCamelCase_: T ,lowerCamelCase_: int ) -> None:
# Update the weight of the given key
UpperCAmelCase_ : str = self.position_map[elem]
UpperCAmelCase_ : str = (elem, weight)
if position > 0:
UpperCAmelCase_ : Dict = get_parent_position(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowerCamelCase_ )
else:
self._bubble_down(lowerCamelCase_ )
else:
self._bubble_down(lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCAmelCase_ : Dict = self.position_map[elem]
if curr_pos == 0:
return None
UpperCAmelCase_ : List[Any] = get_parent_position(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : int = self.heap[curr_pos]
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowerCamelCase_ ,lowerCamelCase_ )
return self._bubble_up(lowerCamelCase_ )
return None
def A__ ( self: Optional[Any] ,lowerCamelCase_: T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCAmelCase_ : str = self.position_map[elem]
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.heap[curr_pos]
UpperCAmelCase_ : int = get_child_left_position(lowerCamelCase_ )
UpperCAmelCase_ : int = get_child_right_position(lowerCamelCase_ )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.heap[child_left_position]
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowerCamelCase_ ,lowerCamelCase_ )
return self._bubble_down(lowerCamelCase_ )
if child_left_position < self.elements:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowerCamelCase_ ,lowerCamelCase_ )
return self._bubble_down(lowerCamelCase_ )
else:
return None
if child_right_position < self.elements:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowerCamelCase_ ,lowerCamelCase_ )
return self._bubble_down(lowerCamelCase_ )
return None
def A__ ( self: Tuple ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> None:
# Swap the nodes at the given positions
UpperCAmelCase_ : Optional[Any] = self.heap[nodea_pos][0]
UpperCAmelCase_ : Any = self.heap[nodea_pos][0]
UpperCAmelCase_ , UpperCAmelCase_ : str = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCAmelCase_ : Tuple = nodea_pos
UpperCAmelCase_ : Optional[Any] = nodea_pos
class _snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self: Optional[int] ) -> None:
UpperCAmelCase_ : dict[T, dict[T, int]] = {}
UpperCAmelCase_ : int = 0
def __repr__( self: int ) -> str:
return str(self.connections )
def __len__( self: List[str] ) -> int:
return self.nodes
def A__ ( self: str ,lowerCamelCase_: T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCAmelCase_ : Optional[int] = {}
self.nodes += 1
def A__ ( self: List[Any] ,lowerCamelCase_: T ,lowerCamelCase_: T ,lowerCamelCase_: int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(lowerCamelCase_ )
self.add_node(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = weight
UpperCAmelCase_ : str = weight
def lowerCamelCase_ ( _a : GraphUndirectedWeighted[T] , ):
'''simple docstring'''
UpperCAmelCase_ : dict[T, int] = {node: maxsize for node in graph.connections}
UpperCAmelCase_ : dict[T, T | None] = {node: None for node in graph.connections}
UpperCAmelCase_ : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_a , _a )
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCAmelCase_ : Dict = priority_queue.extract_min()
UpperCAmelCase_ : int = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCAmelCase_ : List[str] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_a , dist[neighbour] )
UpperCAmelCase_ : str = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCAmelCase_ : Union[str, Any] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCAmelCase_ : int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_a , dist[neighbour] )
UpperCAmelCase_ : Optional[int] = node
return dist, parent
| 59
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger('''transformers.models.speecht5''')
def lowerCamelCase_ ( _a : str , _a : int , _a : Union[str, Any] ):
'''simple docstring'''
hf_model.apply_weight_norm()
UpperCAmelCase_ : Optional[int] = checkpoint["""input_conv.weight_g"""]
UpperCAmelCase_ : str = checkpoint["""input_conv.weight_v"""]
UpperCAmelCase_ : str = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase_ : Dict = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCAmelCase_ : Any = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCAmelCase_ : Union[str, Any] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase_ : Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCAmelCase_ : Dict = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCAmelCase_ : Optional[Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCAmelCase_ : Tuple = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCAmelCase_ : Optional[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCAmelCase_ : Tuple = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCAmelCase_ : Optional[int] = checkpoint["""output_conv.1.weight_g"""]
UpperCAmelCase_ : Optional[Any] = checkpoint["""output_conv.1.weight_v"""]
UpperCAmelCase_ : Union[str, Any] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def lowerCamelCase_ ( _a : Tuple , _a : int , _a : Any , _a : Tuple=None , _a : Dict=None , ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(_a )
else:
UpperCAmelCase_ : str = SpeechTaHifiGanConfig()
UpperCAmelCase_ : List[str] = SpeechTaHifiGan(_a )
UpperCAmelCase_ : int = torch.load(_a )
load_weights(orig_checkpoint["""model"""]["""generator"""] , _a , _a )
UpperCAmelCase_ : List[Any] = np.load(_a )
UpperCAmelCase_ : Optional[Any] = stats[0].reshape(-1 )
UpperCAmelCase_ : int = stats[1].reshape(-1 )
UpperCAmelCase_ : Any = torch.from_numpy(_a ).float()
UpperCAmelCase_ : int = torch.from_numpy(_a ).float()
model.save_pretrained(_a )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_a )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 59
| 1
|
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowercase__ ( __lowercase : Features ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = np.inf
def set_batch_size(__lowercase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__lowercase , __lowercase ):
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__lowercase , __lowercase ):
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary":
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__lowercase , __lowercase )
return None if batch_size is np.inf else batch_size
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __A : NestedDataStructureLike[PathLike] , __A : Optional[NamedSplit] = None , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[int] = None , **__A : Dict , ):
super().__init__(
__A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , )
__UpperCamelCase = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths}
__UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1]
__UpperCamelCase = Parquet(
cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , )
def _lowerCamelCase ( self : Optional[int] ):
# Build iterable dataset
if self.streaming:
__UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , )
__UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=__A , in_memory=self.keep_in_memory )
return dataset
class snake_case :
"""simple docstring"""
def __init__( self : List[str] , __A : Dataset , __A : Union[PathLike, BinaryIO] , __A : Optional[int] = None , **__A : Dict , ):
__UpperCamelCase = dataset
__UpperCamelCase = path_or_buf
__UpperCamelCase = batch_size or get_writer_batch_size(dataset.features )
__UpperCamelCase = parquet_writer_kwargs
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__UpperCamelCase = self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs )
else:
__UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs )
return written
def _lowerCamelCase ( self : List[str] , __A : BinaryIO , __A : int , **__A : List[str] ):
__UpperCamelCase = 0
__UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __A )
__UpperCamelCase = self.dataset.features.arrow_schema
__UpperCamelCase = pq.ParquetWriter(__A , schema=__A , **__A )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__UpperCamelCase = query_table(
table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__A )
written += batch.nbytes
writer.close()
return written
| 53
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[str] ={
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any =[
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCamelCase ( UpperCAmelCase ):
if num <= 0:
lowercase__ : Union[str, Any] = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(__UpperCamelCase )
lowercase__ : Dict = [True] * (num + 1)
lowercase__ : Union[str, Any] = []
lowercase__ : str = 2
lowercase__ : List[Any] = int(math.sqrt(__UpperCamelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__UpperCamelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , __UpperCamelCase ):
if sieve[i] is True:
lowercase__ : Any = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__UpperCamelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 369
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
lowercase__ : Tuple = sorted(string.lower() )
return len(UpperCAmelCase ) == len(set(UpperCAmelCase ) )
if __name__ == "__main__":
__a: Union[str, Any] = input("""Enter a string """).strip()
__a: Tuple = is_isogram(input_str)
print(F'{input_str} is {"an" if isogram else "not an"} isogram.')
| 214
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Union[str, Any] = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 2
|
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = DistilBertTokenizer
__lowerCAmelCase = DistilBertTokenizerFast
__lowerCAmelCase = True
@slow
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
a =tokenizer.encode('''sequence builders''' , add_special_tokens=__A )
a =tokenizer.encode('''multi-sequence build''' , add_special_tokens=__A )
a =tokenizer.build_inputs_with_special_tokens(__A )
a =tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 81
| 0
|
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 357
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase_ : str = 1.054571817e-34 # unit of ℏ : J * s
UpperCAmelCase_ : Dict = 3e8 # unit of c : m * s^-1
def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
a_ : Optional[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
a_ : List[str] = (2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
a_ : Tuple = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120
| 0
|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowerCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowercase ( self : List[Any], UpperCAmelCase__ : str ):
with open(_UpperCAmelCase, encoding="utf-8" ) as input_file:
__lowercase = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__lowercase = input_file.read()
__lowercase = regexp.search(_UpperCAmelCase )
return match
def _lowercase ( self : Tuple, UpperCAmelCase__ : str ):
with open(_UpperCAmelCase, encoding="utf-8" ) as input_file:
__lowercase = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL )
__lowercase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowercase = regexp.finditer(_UpperCAmelCase )
__lowercase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowercase ( self : Union[str, Any] ):
__lowercase = Path("./datasets" )
__lowercase = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def _lowercase ( self : str ):
__lowercase = Path("./datasets" )
__lowercase = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCAmelCase ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 17
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = scheduler
__lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
__lowercase = split_batches
__lowercase = step_with_optimizer
__lowercase = GradientState()
def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.scheduler.get_last_lr()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self.scheduler.state_dict()
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.scheduler.load_state_dict(_UpperCAmelCase )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return self.scheduler.get_lr()
def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 325
| 0
|
def _a ( a :int ) -> int:
if not isinstance(a , a ):
raise TypeError('''only integers accepted as input''' )
else:
a = str(abs(a ) )
a = [list(a ) for char in range(len(a ) )]
for index in range(len(a ) ):
num_transpositions[index].pop(a )
return max(
int(''''''.join(list(a ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger("transformers.models.speecht5")
def _a ( a :Optional[Any] , a :Tuple , a :Dict ) -> List[str]:
hf_model.apply_weight_norm()
a = checkpoint['''input_conv.weight_g''']
a = checkpoint['''input_conv.weight_v''']
a = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
a = checkpoint[F"""upsamples.{i}.1.weight_g"""]
a = checkpoint[F"""upsamples.{i}.1.weight_v"""]
a = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
a = checkpoint['''output_conv.1.weight_g''']
a = checkpoint['''output_conv.1.weight_v''']
a = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( a :List[str] , a :Union[str, Any] , a :Dict , a :Dict=None , a :List[Any]=None , ) -> int:
if config_path is not None:
a = SpeechTaHifiGanConfig.from_pretrained(a )
else:
a = SpeechTaHifiGanConfig()
a = SpeechTaHifiGan(a )
a = torch.load(a )
load_weights(orig_checkpoint['''model''']['''generator'''] , a , a )
a = np.load(a )
a = stats[0].reshape(-1 )
a = stats[1].reshape(-1 )
a = torch.from_numpy(a ).float()
a = torch.from_numpy(a ).float()
model.save_pretrained(a )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCAmelCase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 26
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : torch.FloatTensor
class SCREAMING_SNAKE_CASE_ ( __a , __a ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = 2_0 , lowerCAmelCase__ = 7_6_8 , lowerCAmelCase__=7_7 , lowerCAmelCase__=4 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = "silu" , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "linear" , lowerCAmelCase__ = "prd" , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ):
super().__init__()
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = attention_head_dim
__SCREAMING_SNAKE_CASE = num_attention_heads * attention_head_dim
__SCREAMING_SNAKE_CASE = additional_embeddings
__SCREAMING_SNAKE_CASE = time_embed_dim or inner_dim
__SCREAMING_SNAKE_CASE = embedding_proj_dim or embedding_dim
__SCREAMING_SNAKE_CASE = clip_embed_dim or embedding_dim
__SCREAMING_SNAKE_CASE = Timesteps(lowerCAmelCase__ , lowerCAmelCase__ , 0)
__SCREAMING_SNAKE_CASE = TimestepEmbedding(lowerCAmelCase__ , lowerCAmelCase__ , out_dim=lowerCAmelCase__ , act_fn=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__)
if embedding_proj_norm_type is None:
__SCREAMING_SNAKE_CASE = None
elif embedding_proj_norm_type == "layer":
__SCREAMING_SNAKE_CASE = nn.LayerNorm(lowerCAmelCase__)
else:
raise ValueError(f"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}")
__SCREAMING_SNAKE_CASE = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__)
if encoder_hid_proj_type is None:
__SCREAMING_SNAKE_CASE = None
elif encoder_hid_proj_type == "linear":
__SCREAMING_SNAKE_CASE = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__)
else:
raise ValueError(f"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}")
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCAmelCase__))
if added_emb_type == "prd":
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , 1 , lowerCAmelCase__))
elif added_emb_type is None:
__SCREAMING_SNAKE_CASE = None
else:
raise ValueError(
f"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.")
__SCREAMING_SNAKE_CASE = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dropout=lowerCAmelCase__ , activation_fn="""gelu""" , attention_bias=lowerCAmelCase__ , )
for d in range(lowerCAmelCase__)
])
if norm_in_type == "layer":
__SCREAMING_SNAKE_CASE = nn.LayerNorm(lowerCAmelCase__)
elif norm_in_type is None:
__SCREAMING_SNAKE_CASE = None
else:
raise ValueError(f"Unsupported norm_in_type: {norm_in_type}.")
__SCREAMING_SNAKE_CASE = nn.LayerNorm(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0)
causal_attention_mask.triu_(1)
__SCREAMING_SNAKE_CASE = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , lowerCAmelCase__ , persistent=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , lowerCAmelCase__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = {}
def fn_recursive_add_processors(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
if hasattr(lowerCAmelCase__ , """set_processor"""):
__SCREAMING_SNAKE_CASE = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , lowerCAmelCase__ , lowerCAmelCase__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
return processors
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = len(self.attn_processors.keys())
if isinstance(lowerCAmelCase__ , lowerCAmelCase__) and len(lowerCAmelCase__) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(lowerCAmelCase__)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes.")
def fn_recursive_attn_processor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
if hasattr(lowerCAmelCase__ , """set_processor"""):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__):
module.set_processor(lowerCAmelCase__)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , lowerCAmelCase__ , lowerCAmelCase__)
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
self.set_attn_processor(AttnProcessor())
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , ):
__SCREAMING_SNAKE_CASE = hidden_states.shape[0]
__SCREAMING_SNAKE_CASE = timestep
if not torch.is_tensor(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(lowerCAmelCase__) and len(timesteps.shape) == 0:
__SCREAMING_SNAKE_CASE = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__SCREAMING_SNAKE_CASE = timesteps * torch.ones(lowerCAmelCase__ , dtype=timesteps.dtype , device=timesteps.device)
__SCREAMING_SNAKE_CASE = self.time_proj(lowerCAmelCase__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__SCREAMING_SNAKE_CASE = timesteps_projected.to(dtype=self.dtype)
__SCREAMING_SNAKE_CASE = self.time_embedding(lowerCAmelCase__)
if self.embedding_proj_norm is not None:
__SCREAMING_SNAKE_CASE = self.embedding_proj_norm(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.embedding_proj(lowerCAmelCase__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__SCREAMING_SNAKE_CASE = self.encoder_hidden_states_proj(lowerCAmelCase__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""")
__SCREAMING_SNAKE_CASE = self.proj_in(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.positional_embedding.to(hidden_states.dtype)
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
__SCREAMING_SNAKE_CASE = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
__SCREAMING_SNAKE_CASE = hidden_states[:, None, :]
__SCREAMING_SNAKE_CASE = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__SCREAMING_SNAKE_CASE = self.prd_embedding.to(hidden_states.dtype).expand(lowerCAmelCase__ , -1 , -1)
additional_embeds.append(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.cat(
lowerCAmelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__SCREAMING_SNAKE_CASE = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__SCREAMING_SNAKE_CASE = F.pad(
lowerCAmelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__SCREAMING_SNAKE_CASE = hidden_states + positional_embeddings
if attention_mask is not None:
__SCREAMING_SNAKE_CASE = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0
__SCREAMING_SNAKE_CASE = F.pad(lowerCAmelCase__ , (0, self.additional_embeddings) , value=0.0)
__SCREAMING_SNAKE_CASE = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
__SCREAMING_SNAKE_CASE = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
__SCREAMING_SNAKE_CASE = self.norm_in(lowerCAmelCase__)
for block in self.transformer_blocks:
__SCREAMING_SNAKE_CASE = block(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.norm_out(lowerCAmelCase__)
if self.prd_embedding is not None:
__SCREAMING_SNAKE_CASE = hidden_states[:, -1]
else:
__SCREAMING_SNAKE_CASE = hidden_states[:, additional_embeddings_len:]
__SCREAMING_SNAKE_CASE = self.proj_to_clip_embeddings(lowerCAmelCase__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 100
|
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError('String lengths must match!' )
lowercase__ : Union[str, Any] = 0
for chara, chara in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __a ( snake_case__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ConsistencyModelPipeline
SCREAMING_SNAKE_CASE_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
SCREAMING_SNAKE_CASE_ = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _lowerCAmelCase ( self : str ):
UpperCamelCase__ : Optional[int] =UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : List[Any] =UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def _lowerCAmelCase ( self : Any , lowercase_ : Optional[int]=False ):
if class_cond:
UpperCamelCase__ : int =self.dummy_cond_unet
else:
UpperCamelCase__ : Union[str, Any] =self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCamelCase__ : List[str] =CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCamelCase__ : Any ={
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _lowerCAmelCase ( self : List[Any] , lowercase_ : int , lowercase_ : List[str]=0 ):
if str(lowercase_ ).startswith('''mps''' ):
UpperCamelCase__ : Tuple =torch.manual_seed(lowercase_ )
else:
UpperCamelCase__ : Union[str, Any] =torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCamelCase__ : Tuple ={
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ : List[str] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Optional[Any] =self.get_dummy_components()
UpperCamelCase__ : Union[str, Any] =ConsistencyModelPipeline(**lowercase_ )
UpperCamelCase__ : Optional[Any] =pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : Optional[int] =self.get_dummy_inputs(lowercase_ )
UpperCamelCase__ : Any =pipe(**lowercase_ ).images
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ : str =image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple =np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Union[str, Any] =self.get_dummy_components(class_cond=lowercase_ )
UpperCamelCase__ : Dict =ConsistencyModelPipeline(**lowercase_ )
UpperCamelCase__ : Union[str, Any] =pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : Any =self.get_dummy_inputs(lowercase_ )
UpperCamelCase__ : int =0
UpperCamelCase__ : Optional[int] =pipe(**lowercase_ ).images
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ : Union[str, Any] =image[0, -3:, -3:, -1]
UpperCamelCase__ : Any =np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCAmelCase ( self : Dict ):
UpperCamelCase__ : Any ='''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any =self.get_dummy_components()
UpperCamelCase__ : str =ConsistencyModelPipeline(**lowercase_ )
UpperCamelCase__ : Tuple =pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.get_dummy_inputs(lowercase_ )
UpperCamelCase__ : Dict =1
UpperCamelCase__ : List[str] =None
UpperCamelCase__ : List[str] =pipe(**lowercase_ ).images
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ : Optional[int] =image[0, -3:, -3:, -1]
UpperCamelCase__ : List[Any] =np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCAmelCase ( self : Dict ):
UpperCamelCase__ : List[Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Union[str, Any] =self.get_dummy_components(class_cond=lowercase_ )
UpperCamelCase__ : List[str] =ConsistencyModelPipeline(**lowercase_ )
UpperCamelCase__ : Optional[int] =pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : Optional[Any] =self.get_dummy_inputs(lowercase_ )
UpperCamelCase__ : Tuple =1
UpperCamelCase__ : str =None
UpperCamelCase__ : Optional[int] =0
UpperCamelCase__ : Union[str, Any] =pipe(**lowercase_ ).images
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ : List[str] =image[0, -3:, -3:, -1]
UpperCamelCase__ : int =np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : str , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=False , lowercase_ : Optional[Any]="cpu" , lowercase_ : Optional[Any]=torch.floataa , lowercase_ : Union[str, Any]=(1, 3, 64, 64) ):
UpperCamelCase__ : Tuple =torch.manual_seed(lowercase_ )
UpperCamelCase__ : List[Any] ={
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
UpperCamelCase__ : List[Any] =self.get_fixed_latents(seed=lowercase_ , device=lowercase_ , dtype=lowercase_ , shape=lowercase_ )
UpperCamelCase__ : Any =latents
return inputs
def _lowerCAmelCase ( self : Optional[Any] , lowercase_ : List[str]=0 , lowercase_ : Dict="cpu" , lowercase_ : int=torch.floataa , lowercase_ : str=(1, 3, 64, 64) ):
if type(lowercase_ ) == str:
UpperCamelCase__ : List[Any] =torch.device(lowercase_ )
UpperCamelCase__ : Union[str, Any] =torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCamelCase__ : Any =randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
return latents
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : Tuple =UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
UpperCamelCase__ : List[Any] =CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCamelCase__ : Optional[int] =ConsistencyModelPipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(torch_device=lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : str =self.get_inputs()
UpperCamelCase__ : str =pipe(**lowercase_ ).images
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : Tuple =image[0, -3:, -3:, -1]
UpperCamelCase__ : Any =np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : Tuple =UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
UpperCamelCase__ : Dict =CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCamelCase__ : Optional[int] =ConsistencyModelPipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(torch_device=lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : Optional[Any] =self.get_inputs()
UpperCamelCase__ : Optional[Any] =1
UpperCamelCase__ : int =None
UpperCamelCase__ : int =pipe(**lowercase_ ).images
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : str =image[0, -3:, -3:, -1]
UpperCamelCase__ : List[Any] =np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : str =UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
UpperCamelCase__ : int =CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCamelCase__ : List[str] =ConsistencyModelPipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(torch_device=lowercase_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.get_inputs(get_fixed_latents=lowercase_ , device=lowercase_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowercase_ , enable_math=lowercase_ , enable_mem_efficient=lowercase_ ):
UpperCamelCase__ : Optional[Any] =pipe(**lowercase_ ).images
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : int =image[0, -3:, -3:, -1]
UpperCamelCase__ : int =np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : Any =UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
UpperCamelCase__ : List[str] =CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCamelCase__ : Optional[Any] =ConsistencyModelPipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(torch_device=lowercase_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.get_inputs(get_fixed_latents=lowercase_ , device=lowercase_ )
UpperCamelCase__ : List[Any] =1
UpperCamelCase__ : int =None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowercase_ , enable_math=lowercase_ , enable_mem_efficient=lowercase_ ):
UpperCamelCase__ : Tuple =pipe(**lowercase_ ).images
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : Dict =image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[Any] =np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 157
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
# General docstring
_SCREAMING_SNAKE_CASE : Union[str, Any] = """ResNetConfig"""
# Base docstring
_SCREAMING_SNAKE_CASE : str = """microsoft/resnet-50"""
_SCREAMING_SNAKE_CASE : List[Any] = [1, 2_0_4_8, 7, 7]
# Image classification docstring
_SCREAMING_SNAKE_CASE : Tuple = """microsoft/resnet-50"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """tiger cat"""
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : str , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : str = "relu" ):
super().__init__()
UpperCamelCase__ : Optional[Any] =nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , bias=lowercase_ )
UpperCamelCase__ : Tuple =nn.BatchNormad(lowercase_ )
UpperCamelCase__ : int =ACTaFN[activation] if activation is not None else nn.Identity()
def _lowerCAmelCase ( self : Dict , lowercase_ : Tensor ):
UpperCamelCase__ : List[Any] =self.convolution(lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.normalization(lowercase_ )
UpperCamelCase__ : Optional[int] =self.activation(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowercase_ : ResNetConfig ):
super().__init__()
UpperCamelCase__ : Any =ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
UpperCamelCase__ : Tuple =nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
UpperCamelCase__ : Any =config.num_channels
def _lowerCAmelCase ( self : str , lowercase_ : Tensor ):
UpperCamelCase__ : Optional[Any] =pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
UpperCamelCase__ : Dict =self.embedder(lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.pooler(lowercase_ )
return embedding
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
UpperCamelCase__ : int =nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
UpperCamelCase__ : Optional[int] =nn.BatchNormad(lowercase_ )
def _lowerCAmelCase ( self : Tuple , lowercase_ : Tensor ):
UpperCamelCase__ : Dict =self.convolution(lowercase_ )
UpperCamelCase__ : Dict =self.normalization(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 , lowercase_ : str = "relu" ):
super().__init__()
UpperCamelCase__ : Optional[Any] =in_channels != out_channels or stride != 1
UpperCamelCase__ : str =(
ResNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase__ : List[str] =nn.Sequential(
ResNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ ) , ResNetConvLayer(lowercase_ , lowercase_ , activation=lowercase_ ) , )
UpperCamelCase__ : Any =ACTaFN[activation]
def _lowerCAmelCase ( self : str , lowercase_ : Tuple ):
UpperCamelCase__ : Any =hidden_state
UpperCamelCase__ : Union[str, Any] =self.layer(lowercase_ )
UpperCamelCase__ : str =self.shortcut(lowercase_ )
hidden_state += residual
UpperCamelCase__ : str =self.activation(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : str , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 , lowercase_ : str = "relu" , lowercase_ : int = 4 ):
super().__init__()
UpperCamelCase__ : Optional[Any] =in_channels != out_channels or stride != 1
UpperCamelCase__ : Union[str, Any] =out_channels // reduction
UpperCamelCase__ : str =(
ResNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase__ : int =nn.Sequential(
ResNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 ) , ResNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ ) , ResNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
UpperCamelCase__ : List[Any] =ACTaFN[activation]
def _lowerCAmelCase ( self : Tuple , lowercase_ : Optional[int] ):
UpperCamelCase__ : Dict =hidden_state
UpperCamelCase__ : str =self.layer(lowercase_ )
UpperCamelCase__ : Tuple =self.shortcut(lowercase_ )
hidden_state += residual
UpperCamelCase__ : Optional[int] =self.activation(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowercase_ : ResNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
UpperCamelCase__ : Dict =ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
UpperCamelCase__ : Union[str, Any] =nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase_ , lowercase_ , stride=lowercase_ , activation=config.hidden_act ) , *[layer(lowercase_ , lowercase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _lowerCAmelCase ( self : Tuple , lowercase_ : Tensor ):
UpperCamelCase__ : Optional[Any] =input
for layer in self.layers:
UpperCamelCase__ : Tuple =layer(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase_ : ResNetConfig ):
super().__init__()
UpperCamelCase__ : Optional[Any] =nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCamelCase__ : int =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def _lowerCAmelCase ( self : Dict , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
UpperCamelCase__ : int =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase__ : Union[str, Any] =hidden_states + (hidden_state,)
UpperCamelCase__ : List[str] =stage_module(lowercase_ )
if output_hidden_states:
UpperCamelCase__ : Optional[Any] =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase_ , hidden_states=lowercase_ , )
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ResNetConfig
SCREAMING_SNAKE_CASE_ = 'resnet'
SCREAMING_SNAKE_CASE_ = 'pixel_values'
SCREAMING_SNAKE_CASE_ = True
def _lowerCAmelCase ( self : str , lowercase_ : Optional[int] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _lowerCAmelCase ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Dict=False ):
if isinstance(lowercase_ , lowercase_ ):
UpperCamelCase__ : str =value
_SCREAMING_SNAKE_CASE : int = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_SCREAMING_SNAKE_CASE : Optional[int] = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.', snake_case__, )
class __a ( snake_case__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowercase_ : List[Any] ):
super().__init__(lowercase_ )
UpperCamelCase__ : Dict =config
UpperCamelCase__ : str =ResNetEmbeddings(lowercase_ )
UpperCamelCase__ : str =ResNetEncoder(lowercase_ )
UpperCamelCase__ : Union[str, Any] =nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
UpperCamelCase__ : Union[str, Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : Optional[Any] =self.embedder(lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
UpperCamelCase__ : int =encoder_outputs[0]
UpperCamelCase__ : List[Any] =self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ', snake_case__, )
class __a ( snake_case__ ):
"""simple docstring"""
def __init__( self : Dict , lowercase_ : Union[str, Any] ):
super().__init__(lowercase_ )
UpperCamelCase__ : Any =config.num_labels
UpperCamelCase__ : Dict =ResNetModel(lowercase_ )
# classification head
UpperCamelCase__ : Any =nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCAmelCase ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
UpperCamelCase__ : Dict =return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : List[Any] =self.resnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
UpperCamelCase__ : Tuple =outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase__ : Union[str, Any] =self.classifier(lowercase_ )
UpperCamelCase__ : int =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase__ : List[str] ='''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase__ : Dict ='''single_label_classification'''
else:
UpperCamelCase__ : str ='''multi_label_classification'''
if self.config.problem_type == "regression":
UpperCamelCase__ : Union[str, Any] =MSELoss()
if self.num_labels == 1:
UpperCamelCase__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase__ : Dict =loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase__ : List[Any] =CrossEntropyLoss()
UpperCamelCase__ : List[Any] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase__ : Optional[Any] =BCEWithLogitsLoss()
UpperCamelCase__ : List[str] =loss_fct(lowercase_ , lowercase_ )
if not return_dict:
UpperCamelCase__ : Tuple =(logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ', snake_case__, )
class __a ( snake_case__, snake_case__ ):
"""simple docstring"""
def __init__( self : str , lowercase_ : List[Any] ):
super().__init__(lowercase_ )
super()._init_backbone(lowercase_ )
UpperCamelCase__ : str =[config.embedding_size] + config.hidden_sizes
UpperCamelCase__ : Optional[int] =ResNetEmbeddings(lowercase_ )
UpperCamelCase__ : Dict =ResNetEncoder(lowercase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@replace_return_docstrings(output_type=lowercase_ , config_class=_CONFIG_FOR_DOC )
def _lowerCAmelCase ( self : int , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
UpperCamelCase__ : Union[str, Any] =return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : Union[str, Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : Any =self.embedder(lowercase_ )
UpperCamelCase__ : Optional[Any] =self.encoder(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
UpperCamelCase__ : str =outputs.hidden_states
UpperCamelCase__ : Optional[int] =()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
UpperCamelCase__ : int =(feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase_ , )
| 157
| 1
|
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 1
__SCREAMING_SNAKE_CASE : Optional[int] = 2
while i * i <= n:
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = 1
__SCREAMING_SNAKE_CASE : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(lowercase__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 9
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any]=13, _lowerCamelCase : Any=3, _lowerCamelCase : Optional[int]=2_24, _lowerCamelCase : str=30, _lowerCamelCase : Dict=4_00, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Any=[0.5, 0.5, 0.5], _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], ):
'''simple docstring'''
__A = size if size is not None else {'''height''': 18, '''width''': 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_normalize
__A = image_mean
__A = image_std
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : str = ViTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = EfficientFormerImageProcessorTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, Image.Image )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
# Initialize image_processor
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, np.ndarray )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
# Initialize image_processor
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, torch.Tensor )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
| 266
| 0
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
while b:
_snake_case, _snake_case = b, a % b
return a
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return a if b == 0 else euclidean_gcd_recursive(_SCREAMING_SNAKE_CASE , a % b )
def __SCREAMING_SNAKE_CASE ( ) -> str:
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 357
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = StableDiffusionPanoramaPipeline
lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase (self ) -> List[Any]:
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_snake_case = DDIMScheduler()
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_snake_case = CLIPTextModel(UpperCAmelCase )
_snake_case = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase (self , UpperCAmelCase , UpperCAmelCase=0 ) -> Tuple:
_snake_case = torch.manual_seed(UpperCAmelCase )
_snake_case = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase (self ) -> Tuple:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = sd_pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase (self ) -> Tuple:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase (self ) -> Any:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def lowercase (self ) -> Any:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = """french fries"""
_snake_case = sd_pipe(**UpperCAmelCase , negative_prompt=UpperCAmelCase )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase (self ) -> str:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = sd_pipe(**UpperCAmelCase , view_batch_size=2 )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase (self ) -> Tuple:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = sd_pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase (self ) -> str:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=UpperCAmelCase )
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = sd_pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase (self , UpperCAmelCase=0 ) -> List[str]:
_snake_case = torch.manual_seed(UpperCAmelCase )
_snake_case = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowercase (self ) -> List[Any]:
_snake_case = """stabilityai/stable-diffusion-2-base"""
_snake_case = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="""scheduler""" )
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_snake_case = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def lowercase (self ) -> str:
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=UpperCAmelCase )
_snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_snake_case = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase (self ) -> Optional[int]:
_snake_case = 0
def callback_fn(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> None:
_snake_case = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_snake_case = False
_snake_case = """stabilityai/stable-diffusion-2-base"""
_snake_case = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="""scheduler""" )
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
_snake_case = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
pipe(**UpperCAmelCase , callback=UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase (self ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_snake_case = """stabilityai/stable-diffusion-2-base"""
_snake_case = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="""scheduler""" )
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
_snake_case = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_snake_case = self.get_inputs()
_snake_case = pipe(**UpperCAmelCase )
_snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 270
| 0
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=4 , ) -> Union[str, Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_attention_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =num_choices
def _a ( self ) -> Dict:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_attention_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self ) -> Tuple:
__UpperCamelCase =FlaxAlbertModelTester(self )
@slow
def _a ( self ) -> str:
for model_class_name in self.all_model_classes:
__UpperCamelCase =model_class_name.from_pretrained('albert-base-v2' )
__UpperCamelCase =model(np.ones((1, 1) ) )
self.assertIsNotNone(A_ )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> str:
__UpperCamelCase =FlaxAlbertModel.from_pretrained('albert-base-v2' )
__UpperCamelCase =np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCamelCase =np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCamelCase =model(A_ , attention_mask=A_ )[0]
__UpperCamelCase =(1, 11, 768)
self.assertEqual(output.shape , A_ )
__UpperCamelCase =np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A_ , atol=1E-4 ) )
| 62
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ : Dict = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = ["GLPNFeatureExtractor"]
lowerCAmelCase__ : Optional[int] = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 352
|
'''simple docstring'''
from collections.abc import Callable
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : float = a
__UpperCAmelCase : float = b
if function(_UpperCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCAmelCase ) == 0:
return b
elif (
function(_UpperCAmelCase ) * function(_UpperCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
__UpperCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCAmelCase ) == 0:
return mid
elif function(_UpperCAmelCase ) * function(_UpperCAmelCase ) < 0:
__UpperCAmelCase : int = mid
else:
__UpperCAmelCase : Dict = mid
__UpperCAmelCase : str = start + (end - start) / 2.0
return mid
def __UpperCamelCase ( _UpperCAmelCase ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 37
| 0
|
'''simple docstring'''
import numpy as np
from PIL import Image
def snake_case__ ( _A: np.ndarray , _A: int , _A: int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
# compute the shape of the output matrix
lowerCAmelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase = 0
lowerCAmelCase = 0
return updated_arr
def snake_case__ ( _A: np.ndarray , _A: int , _A: int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
# compute the shape of the output matrix
lowerCAmelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase = 0
lowerCAmelCase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__lowercase = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 272
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272
| 1
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __UpperCAmelCase ( ) -> str:
'''simple docstring'''
__snake_case : Tuple = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=_snake_case , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=_snake_case , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=_snake_case )
return parser.parse_args()
def __UpperCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
__snake_case : Dict = parse_args()
# Import training_script as a module.
__snake_case : Tuple = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__snake_case : int = script_fpath.stem
__snake_case : str = importlib.import_module(_snake_case )
# Patch sys.argv
__snake_case : int = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 352
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_a : Dict= logging.get_logger(__name__)
class UpperCamelCase ( lowercase ):
def __init__(self : List[str] , *_A : Dict , **_A : Optional[Any]) -> None:
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _A , )
super().__init__(*_A , **_A)
| 95
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
|
from torch import nn
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {act_fn}" )
| 138
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
class a_ ( a_ ):
'''simple docstring'''
__a: Union[str, Any] = '''encoder-decoder'''
__a: str = True
def __init__( self , **lowercase_ ) -> str:
'''simple docstring'''
super().__init__(**lowercase_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ = kwargs.pop('encoder' )
lowerCAmelCase_ = encoder_config.pop('model_type' )
lowerCAmelCase_ = kwargs.pop('decoder' )
lowerCAmelCase_ = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ = AutoConfig.for_model(lowercase_ , **lowercase_ )
lowerCAmelCase_ = AutoConfig.for_model(lowercase_ , **lowercase_ )
lowerCAmelCase_ = True
@classmethod
def _lowercase ( cls , lowercase_ , lowercase_ , **lowercase_ ) -> PretrainedConfig:
'''simple docstring'''
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
lowerCAmelCase_ = True
lowerCAmelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowercase_ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ = self.encoder.to_dict()
lowerCAmelCase_ = self.decoder.to_dict()
lowerCAmelCase_ = self.__class__.model_type
return output
| 14
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase ( a_ ) -> List[str]:
if isinstance(a_ , torch.Tensor ):
return image
elif isinstance(a_ , PIL.Image.Image ):
lowerCAmelCase_ = [image]
lowerCAmelCase_ = [trans(img.convert('RGB' ) ) for img in image]
lowerCAmelCase_ = torch.stack(a_ )
return image
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def _lowercase ( self , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = min(int(num_inference_steps * strength ) , lowercase_ )
lowerCAmelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Tuple:
'''simple docstring'''
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' )
lowerCAmelCase_ = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase_ = init_latents.shape
lowerCAmelCase_ = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
print('add noise to latents at timestep' , lowercase_ )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase_ = None , lowercase_ = 0.8 , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase_ )
# 2. Preprocess image
lowerCAmelCase_ = preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
lowerCAmelCase_ , lowerCAmelCase_ = self.get_timesteps(lowercase_ , lowercase_ , self.device )
lowerCAmelCase_ = timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
lowerCAmelCase_ = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ )
lowerCAmelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample
lowerCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 14
| 1
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Any = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
__UpperCAmelCase : Dict = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('''RGB''' )
return image
def a ( _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def a ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : str = dct.pop(_UpperCAmelCase )
__UpperCAmelCase : Any = val
def a ( _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCAmelCase : Optional[int] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
__UpperCAmelCase : List[str] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
__UpperCAmelCase : List[str] = torch.cat((q_bias, torch.zeros_like(_UpperCAmelCase , requires_grad=_UpperCAmelCase ), v_bias) )
__UpperCAmelCase : Optional[Any] = qkv_bias
def a ( _UpperCAmelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = 3_64 if '''coco''' in model_name else 2_24
__UpperCAmelCase : Tuple = InstructBlipVisionConfig(image_size=_UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__UpperCAmelCase : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCAmelCase : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__UpperCAmelCase : Union[str, Any] = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
__UpperCAmelCase : List[Any] = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_20_01 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__UpperCAmelCase : Dict = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
__UpperCAmelCase : Union[str, Any] = InstructBlipConfig(vision_config=_UpperCAmelCase , text_config=_UpperCAmelCase , qformer_config=_UpperCAmelCase )
return config, image_size
@torch.no_grad()
def a ( _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[int]=False ):
'''simple docstring'''
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
__UpperCAmelCase : Optional[int] = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__UpperCAmelCase : int = LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
__UpperCAmelCase , __UpperCAmelCase : Dict = get_blipa_config(_UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = InstructBlipForConditionalGeneration(_UpperCAmelCase ).eval()
__UpperCAmelCase : int = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__UpperCAmelCase : int = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
__UpperCAmelCase : Any = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = load_model_and_preprocess(
name=_UpperCAmelCase , model_type=_UpperCAmelCase , is_eval=_UpperCAmelCase , device=_UpperCAmelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__UpperCAmelCase : int = original_model.state_dict()
__UpperCAmelCase : Optional[Any] = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCAmelCase : List[str] = state_dict.pop(_UpperCAmelCase )
if key.startswith('''Qformer.bert''' ):
__UpperCAmelCase : Dict = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__UpperCAmelCase : Dict = key.replace('''self''' , '''attention''' )
if "llm_proj" in key:
__UpperCAmelCase : str = key.replace('''llm_proj''' , '''language_projection''' )
if "t5_proj" in key:
__UpperCAmelCase : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''llm_model''' ):
__UpperCAmelCase : int = key.replace('''llm_model''' , '''language_model''' )
if key.startswith('''t5''' ):
__UpperCAmelCase : Tuple = key.replace('''t5''' , '''language''' )
__UpperCAmelCase : List[str] = val
# read in qv biases
read_in_q_v_bias(_UpperCAmelCase , _UpperCAmelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = load_demo_image()
__UpperCAmelCase : Dict = '''What is unusual about this image?'''
# create processor
__UpperCAmelCase : int = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase )
__UpperCAmelCase : Optional[int] = InstructBlipProcessor(
image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase , )
__UpperCAmelCase : Optional[Any] = processor(images=_UpperCAmelCase , text=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# make sure processor creates exact same pixel values
__UpperCAmelCase : int = vis_processors['''eval'''](_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
__UpperCAmelCase : Tuple = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , _UpperCAmelCase )
original_model.to(_UpperCAmelCase )
hf_model.to(_UpperCAmelCase )
with torch.no_grad():
if "vicuna" in model_name:
__UpperCAmelCase : Optional[int] = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
__UpperCAmelCase : str = hf_model(**_UpperCAmelCase ).logits
else:
__UpperCAmelCase : List[str] = original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
__UpperCAmelCase : int = tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(_UpperCAmelCase )
__UpperCAmelCase : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 )
__UpperCAmelCase : List[str] = hf_model(**_UpperCAmelCase , labels=_UpperCAmelCase ).logits
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__UpperCAmelCase : int = 1e-4 if '''vicuna''' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , _UpperCAmelCase , atol=_UpperCAmelCase )
print('''Looks ok!''' )
print('''Generating with original model...''' )
__UpperCAmelCase : str = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
__UpperCAmelCase : str = hf_model.generate(
**_UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__UpperCAmelCase : List[Any] = 2
print('''Original generation:''' , _UpperCAmelCase )
__UpperCAmelCase : Dict = processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = [text.strip() for text in output_text]
print('''HF generation:''' , _UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_UpperCAmelCase )
hf_model.save_pretrained(_UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(f'Salesforce/{model_name}' )
hf_model.push_to_hub(f'Salesforce/{model_name}' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
__A =[
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
__A =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 226
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
__UpperCAmelCase : Optional[int] = DetaConfig(
backbone_config=_UpperCAmelCase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=_UpperCAmelCase , with_box_refine=_UpperCAmelCase , two_stage=_UpperCAmelCase , )
# set labels
__UpperCAmelCase : Optional[int] = '''huggingface/label-files'''
if "o365" in model_name:
__UpperCAmelCase : Tuple = 3_66
__UpperCAmelCase : List[str] = '''object365-id2label.json'''
else:
__UpperCAmelCase : Any = 91
__UpperCAmelCase : int = '''coco-detection-id2label.json'''
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : List[str] = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__UpperCAmelCase : str = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def a ( _UpperCAmelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = dct.pop(_UpperCAmelCase )
__UpperCAmelCase : List[Any] = val
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCAmelCase : str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCAmelCase : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
__UpperCAmelCase : List[Any] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Dict = in_proj_weight[:dim, :]
__UpperCAmelCase : List[str] = in_proj_bias[: dim]
__UpperCAmelCase : str = in_proj_weight[
dim : dim * 2, :
]
__UpperCAmelCase : Any = in_proj_bias[
dim : dim * 2
]
__UpperCAmelCase : Tuple = in_proj_weight[
-dim :, :
]
__UpperCAmelCase : int = in_proj_bias[-dim :]
# fmt: on
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : int = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCAmelCase : List[str] = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
__UpperCAmelCase : Tuple = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Union[str, Any] = in_proj_weight[:hidden_size, :]
__UpperCAmelCase : List[Any] = in_proj_bias[:hidden_size]
__UpperCAmelCase : int = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCAmelCase : str = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCAmelCase : Tuple = in_proj_weight[-hidden_size:, :]
__UpperCAmelCase : Optional[Any] = in_proj_bias[-hidden_size:]
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a ( _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = get_deta_config(_UpperCAmelCase )
# load original state dict
if model_name == "deta-swin-large":
__UpperCAmelCase : Dict = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Any = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
__UpperCAmelCase : str = torch.load(_UpperCAmelCase , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(_UpperCAmelCase , param.shape )
# rename keys
__UpperCAmelCase : int = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_swin_q_k_v(_UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCAmelCase : Optional[Any] = state_dict.pop(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = val
if "input_proj" in key:
__UpperCAmelCase : Union[str, Any] = state_dict.pop(_UpperCAmelCase )
__UpperCAmelCase : List[str] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCAmelCase : Union[str, Any] = state_dict.pop(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase : Union[str, Any] = DetaForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
__UpperCAmelCase : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(_UpperCAmelCase )
# load image processor
__UpperCAmelCase : str = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : Optional[int] = processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__UpperCAmelCase : List[Any] = encoding['''pixel_values''']
__UpperCAmelCase : List[str] = model(pixel_values.to(_UpperCAmelCase ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCAmelCase : str = torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
__UpperCAmelCase : Union[str, Any] = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
__UpperCAmelCase : str = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_UpperCAmelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_UpperCAmelCase ) , atol=1e-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A =parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 226
| 1
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = args.pruning_method
lowerCAmelCase__ = args.threshold
lowerCAmelCase__ = args.model_name_or_path.rstrip('/' )
lowerCAmelCase__ = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
lowerCAmelCase__ = torch.load(os.path.join(lowerCAmelCase__ , 'pytorch_model.bin' ) )
lowerCAmelCase__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCAmelCase__ = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowerCAmelCase__ = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
lowerCAmelCase__ = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowerCAmelCase__ = MagnitudeBinarizer.apply(inputs=lowerCAmelCase__ , threshold=lowerCAmelCase__ )
lowerCAmelCase__ = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCAmelCase__ = name[:-6]
lowerCAmelCase__ = model[F"""{prefix_}mask_scores"""]
lowerCAmelCase__ = TopKBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCAmelCase__ = name[:-6]
lowerCAmelCase__ = model[F"""{prefix_}mask_scores"""]
lowerCAmelCase__ = ThresholdBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCAmelCase__ = name[:-6]
lowerCAmelCase__ = model[F"""{prefix_}mask_scores"""]
lowerCAmelCase__ , lowerCAmelCase__ = -0.1, 1.1
lowerCAmelCase__ = torch.sigmoid(lowerCAmelCase__ )
lowerCAmelCase__ = s * (r - l) + l
lowerCAmelCase__ = s_bar.clamp(min=0.0 , max=1.0 )
lowerCAmelCase__ = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowerCAmelCase__ = os.path.join(
os.path.dirname(lowerCAmelCase__ ) , F"""bertarized_{os.path.basename(lowerCAmelCase__ )}""" )
if not os.path.isdir(lowerCAmelCase__ ):
shutil.copytree(lowerCAmelCase__ , lowerCAmelCase__ )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
lowerCAmelCase__ = parser.parse_args()
main(args)
| 119
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __lowerCamelCase ( ):
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=lowerCAmelCase__ , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=lowerCAmelCase__ , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=lowerCAmelCase__ , default=4_2 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=lowerCAmelCase__ , default=0 , help='cuda_id.' , )
lowerCAmelCase__ = parser.parse_args()
return args
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not len(lowerCAmelCase__ ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowerCAmelCase__ , lowerCAmelCase__ = imgs[0].size
lowerCAmelCase__ = Image.new('RGB' , size=(cols * w, rows * h) )
lowerCAmelCase__ , lowerCAmelCase__ = grid.size
for i, img in enumerate(lowerCAmelCase__ ):
grid.paste(lowerCAmelCase__ , box=(i % cols * w, i // cols * h) )
return grid
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__="robotic cat with wings" , lowerCAmelCase__=7.5 , lowerCAmelCase__=5_0 , lowerCAmelCase__=1 , lowerCAmelCase__=4_2 , ):
lowerCAmelCase__ = torch.Generator(pipeline.device ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase__ = pipeline(
lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , ).images
lowerCAmelCase__ = int(math.sqrt(lowerCAmelCase__ ) )
lowerCAmelCase__ = image_grid(lowerCAmelCase__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowerCAmelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
lowerCAmelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowerCAmelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowerCAmelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowerCAmelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowerCAmelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowerCAmelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowerCAmelCase__ = unet.to(torch.device('cuda', args.cuda_id))
lowerCAmelCase__ = pipeline.to(unet.device)
lowerCAmelCase__ , lowerCAmelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowerCAmelCase__ = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 119
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : Tuple = "backbone." if is_semantic else ""
lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
lowerCamelCase : Optional[Any] = "backbone." if is_semantic else ""
# queries, keys and values
lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
lowerCamelCase : Tuple = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = q_bias
lowerCamelCase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
lowerCamelCase : int = gamma_a
lowerCamelCase : Optional[Any] = gamma_a
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = val
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : List[Any] = False if "rvlcdip" in checkpoint_url else True
lowerCamelCase : str = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE_ , use_mask_token=SCREAMING_SNAKE_CASE_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCamelCase : Union[str, Any] = 1024
lowerCamelCase : Any = 4096
lowerCamelCase : str = 24
lowerCamelCase : List[Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowerCamelCase : Optional[Any] = 16
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "rvlcdip-id2label.json"
lowerCamelCase : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowerCamelCase : Any = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : Dict = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCamelCase : int = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
lowerCamelCase : Tuple = create_rename_keys(SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
lowerCamelCase : List[Any] = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE_ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image
lowerCamelCase : str = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = prepare_img()
lowerCamelCase : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
lowerCamelCase : Optional[Any] = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = outputs.logits
# verify logits
lowerCamelCase : List[Any] = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE_ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
if has_lm_head:
lowerCamelCase : Optional[Any] = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowerCamelCase : Dict = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
_snake_case = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 283
| 1
|
def lowerCAmelCase_ ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
while b:
__magic_name__ , __magic_name__ : Union[str, Any] = b, a % b
return a
def lowerCAmelCase_ ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(_snake_case , a % b )
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 41
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Optional[int] = ["model.decoder.embed_positions.weights"]
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
if "emb" in name:
__magic_name__ : Optional[Any] = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
__magic_name__ : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
__magic_name__ : Dict = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
__magic_name__ : Optional[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
__magic_name__ : List[str] = name.replace("linear2" , "fc2" )
if "norm1" in name:
__magic_name__ : Optional[int] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
__magic_name__ : Union[str, Any] = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
__magic_name__ : Any = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
__magic_name__ : Union[str, Any] = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
__magic_name__ : Optional[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
__magic_name__ : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowerCAmelCase_ ( _snake_case : OrderedDict , _snake_case : int ) -> Tuple[Dict, Dict]:
'''simple docstring'''
__magic_name__ : int = list(state_dict.keys() )
__magic_name__ : Dict = {}
for key in keys:
__magic_name__ : Any = state_dict.pop(_snake_case )
__magic_name__ : Optional[Any] = rename_keys(_snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
__magic_name__ : Optional[int] = val[:hidden_size, :]
__magic_name__ : List[str] = val[hidden_size : 2 * hidden_size, :]
__magic_name__ : List[str] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__magic_name__ : int = val
else:
__magic_name__ : str = val
return state_dict, enc_dec_proj_state_dict
def lowerCAmelCase_ ( _snake_case : str ) -> MusicgenDecoderConfig:
'''simple docstring'''
if checkpoint == "small":
# default config values
__magic_name__ : Tuple = 1024
__magic_name__ : List[str] = 24
__magic_name__ : str = 16
elif checkpoint == "medium":
__magic_name__ : Optional[int] = 1536
__magic_name__ : Dict = 48
__magic_name__ : List[Any] = 24
elif checkpoint == "large":
__magic_name__ : Any = 2048
__magic_name__ : int = 48
__magic_name__ : str = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
__magic_name__ : str = MusicgenDecoderConfig(
hidden_size=_snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=_snake_case , num_attention_heads=_snake_case , )
return config
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : Union[str, Any]=None , _snake_case : List[str]=None , _snake_case : Optional[Any]="cpu" ) -> List[str]:
'''simple docstring'''
__magic_name__ : Dict = MusicGen.get_pretrained(_snake_case , device=_snake_case )
__magic_name__ : Any = decoder_config_from_checkpoint(_snake_case )
__magic_name__ : Any = fairseq_model.lm.state_dict()
__magic_name__ , __magic_name__ : Optional[Any] = rename_state_dict(
_snake_case , hidden_size=decoder_config.hidden_size )
__magic_name__ : str = TaEncoderModel.from_pretrained("t5-base" )
__magic_name__ : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
__magic_name__ : int = MusicgenForCausalLM(_snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__magic_name__ , __magic_name__ : List[str] = decoder.load_state_dict(_snake_case , strict=_snake_case )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_snake_case )
if len(_snake_case ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(_snake_case ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
__magic_name__ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=_snake_case , audio_encoder=_snake_case , decoder=_snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_snake_case )
# check we can do a forward pass
__magic_name__ : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__magic_name__ : List[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__magic_name__ : Dict = model(input_ids=_snake_case , decoder_input_ids=_snake_case ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
__magic_name__ : Optional[Any] = AutoTokenizer.from_pretrained("t5-base" )
__magic_name__ : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
__magic_name__ : Union[str, Any] = MusicgenProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
# set the appropriate bos/pad token ids
__magic_name__ : List[str] = 2048
__magic_name__ : List[str] = 2048
# set other default generation config params
__magic_name__ : Union[str, Any] = int(30 * audio_encoder.config.frame_rate )
__magic_name__ : Optional[Any] = True
__magic_name__ : Dict = 3.0
if pytorch_dump_folder is not None:
Path(_snake_case ).mkdir(exist_ok=_snake_case )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(_snake_case )
processor.push_to_hub(_snake_case )
if __name__ == "__main__":
snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
snake_case : Optional[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 41
| 1
|
from __future__ import annotations
from collections.abc import Callable
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 100,):
_A : List[str] = x_start
_A : List[str] = fnc(snake_case_ )
_A : Tuple = 0.0
for _ in range(snake_case_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_A : List[Any] = (x_end - x_start) / steps + xa
_A : Any = fnc(snake_case_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_A : Optional[Any] = xa
_A : Any = fxa
return area
if __name__ == "__main__":
def lowerCAmelCase_ ( snake_case_ ):
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
_snake_case = 10
while i <= 100000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 26
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> Optional[int]:
super().__init__(_a )
_A : Union[str, Any] = RobertaEmbeddings(_a )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> str:
super().__init__(_a )
_A : Any = config.num_labels
_A : Dict = config.num_hidden_layers
_A : List[str] = DeeRobertaModel(_a )
_A : int = nn.Dropout(config.hidden_dropout_prob )
_A : int = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_a )
def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any:
_A : Optional[int] = self.num_layers
try:
_A : List[str] = self.roberta(
_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , )
_A : List[str] = outputs[1]
_A : List[str] = self.dropout(_a )
_A : Optional[Any] = self.classifier(_a )
_A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_A : List[Any] = e.message
_A : Optional[int] = e.exit_layer
_A : Optional[int] = outputs[0]
if not self.training:
_A : int = entropy(_a )
_A : int = []
_A : int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_A : Union[str, Any] = MSELoss()
_A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_A : Optional[Any] = []
for highway_exit in outputs[-1]:
_A : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_A : List[str] = MSELoss()
_A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_a )
if train_highway:
_A : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_A : int = (loss,) + outputs
if not self.training:
_A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_A : Union[str, Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 26
| 1
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_SCREAMING_SNAKE_CASE = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
_SCREAMING_SNAKE_CASE = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = ''' Hello world! cécé herlolip'''
_SCREAMING_SNAKE_CASE = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] ):
__lowercase = dct.pop(lowerCamelCase_ )
__lowercase = val
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def _lowerCAmelCase ( lowerCamelCase_ : List[str] ):
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
__lowercase = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any]=None ):
if not os.path.exists(lowerCamelCase_ ):
__lowercase = torch.hub.load('''pytorch/fairseq''' , lowerCamelCase_ ).eval()
else:
__lowercase = load_xsum_checkpoint(lowerCamelCase_ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowercase = checkpoint_path.replace('''.''' , '''-''' )
__lowercase = BartConfig.from_pretrained(lowerCamelCase_ )
__lowercase = bart.encode(lowerCamelCase_ ).unsqueeze(0 )
__lowercase = BartTokenizer.from_pretrained(lowerCamelCase_ ).encode(lowerCamelCase_ , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(lowerCamelCase_ , lowerCamelCase_ ).all():
raise ValueError(
f"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
__lowercase = bart.state_dict()
remove_ignore_keys_(lowerCamelCase_ )
__lowercase = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase = BartForSequenceClassification(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
__lowercase = bart.predict('''mnli''' , lowerCamelCase_ , return_logits=lowerCamelCase_ )
__lowercase = model(lowerCamelCase_ )[0] # logits
else: # no classification heads to worry about
__lowercase = bart.model.state_dict()
remove_ignore_keys_(lowerCamelCase_ )
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = bart.extract_features(lowerCamelCase_ )
if hf_checkpoint_name == "facebook/bart-large":
__lowercase = BartModel(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
__lowercase = model(lowerCamelCase_ ).model[0]
else:
__lowercase = BartForConditionalGeneration(lowerCamelCase_ ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCamelCase_ )
if hasattr(lowerCamelCase_ , '''lm_head''' ):
__lowercase = make_linear_from_emb(model.model.shared )
__lowercase = model.model(lowerCamelCase_ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 217
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Union[List[PIL.Image.Image], np.ndarray]
a : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 217
| 1
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: Union[str, Any] = None
lowerCamelCase__: List[Any] = BloomTokenizerFast
lowerCamelCase__: Tuple = BloomTokenizerFast
lowerCamelCase__: List[Any] = True
lowerCamelCase__: Dict = False
lowerCamelCase__: int = "tokenizer_file"
lowerCamelCase__: Optional[Any] = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def _lowerCamelCase ( self: Union[str, Any] ) -> List[str]:
super().setUp()
__UpperCAmelCase : Optional[Any] = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self: Optional[int] , **__lowerCamelCase: List[Any] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
__UpperCAmelCase : Tuple = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
__UpperCAmelCase : List[str] = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
__UpperCAmelCase : str = tokenizer.batch_encode_plus(__lowerCamelCase )["input_ids"]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : str = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: List[str]=6 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__UpperCAmelCase : int = "This is a simple input"
__UpperCAmelCase : str = ["This is a simple input 1", "This is a simple input 2"]
__UpperCAmelCase : List[Any] = ("This is a simple input", "This is a pair")
__UpperCAmelCase : Dict = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.encode_plus(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.batch_encode_plus(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.encode(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.batch_encode_plus(__lowerCamelCase , max_length=__lowerCamelCase )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
__UpperCAmelCase : str = None # Hotfixing padding = None
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , )
def _lowerCamelCase ( self: str ) -> str:
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : Union[str, Any] = load_dataset("xnli" , "all_languages" , split="test" , streaming=__lowerCamelCase )
__UpperCAmelCase : Any = next(iter(__lowerCamelCase ) )["premise"] # pick up one data
__UpperCAmelCase : Optional[Any] = list(sample_data.values() )
__UpperCAmelCase : Union[str, Any] = list(map(tokenizer.encode , __lowerCamelCase ) )
__UpperCAmelCase : Any = [tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) for x in output_tokens]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> Dict:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 157
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(snake_case__ ), magnitude * sin(snake_case__ )]
return [magnitude * cos(radians(snake_case__ ) ), magnitude * sin(radians(snake_case__ ) )]
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ = 10**-1 ) -> bool:
__UpperCAmelCase : NDArray[floataa] = cross(snake_case__, snake_case__ )
__UpperCAmelCase : float = sum(snake_case__ )
return abs(snake_case__ ) < eps
if __name__ == "__main__":
# Test to check if it works
_snake_case = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
_snake_case = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_snake_case = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_snake_case = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_snake_case = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
_snake_case = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 157
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class SCREAMING_SNAKE_CASE__ ( _a ):
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
return 0.0
def _snake_case ( UpperCamelCase : np.ndarray , UpperCamelCase : int ):
UpperCAmelCase : Optional[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
UpperCAmelCase : Optional[Any] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _snake_case ( UpperCamelCase : FilterType , UpperCamelCase : int ):
UpperCAmelCase : str = 512
UpperCAmelCase : Dict = [1] + [0] * (size - 1)
UpperCAmelCase : List[Any] = [filter_type.process(UpperCamelCase ) for item in inputs]
UpperCAmelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
UpperCAmelCase : Any = np.abs(np.fft.fft(UpperCamelCase ) )
UpperCAmelCase : Tuple = 20 * np.logaa(UpperCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
UpperCAmelCase : List[str] = get_bounds(UpperCamelCase , UpperCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(UpperCamelCase )
plt.show()
def _snake_case ( UpperCamelCase : FilterType , UpperCamelCase : int ):
UpperCAmelCase : int = 512
UpperCAmelCase : str = [1] + [0] * (size - 1)
UpperCAmelCase : Any = [filter_type.process(UpperCamelCase ) for item in inputs]
UpperCAmelCase : List[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
UpperCAmelCase : Union[str, Any] = np.angle(np.fft.fft(UpperCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(UpperCamelCase , -2 * pi ) )
plt.show()
| 370
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A: Optional[Any] = logging.get_logger(__name__)
A: Optional[int] = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : int = 'layoutlmv3'
def __init__( self , _SCREAMING_SNAKE_CASE=50265 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
vocab_size=_SCREAMING_SNAKE_CASE , hidden_size=_SCREAMING_SNAKE_CASE , num_hidden_layers=_SCREAMING_SNAKE_CASE , num_attention_heads=_SCREAMING_SNAKE_CASE , intermediate_size=_SCREAMING_SNAKE_CASE , hidden_act=_SCREAMING_SNAKE_CASE , hidden_dropout_prob=_SCREAMING_SNAKE_CASE , attention_probs_dropout_prob=_SCREAMING_SNAKE_CASE , max_position_embeddings=_SCREAMING_SNAKE_CASE , type_vocab_size=_SCREAMING_SNAKE_CASE , initializer_range=_SCREAMING_SNAKE_CASE , layer_norm_eps=_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : List[str] = max_ad_position_embeddings
UpperCAmelCase : List[Any] = coordinate_size
UpperCAmelCase : List[Any] = shape_size
UpperCAmelCase : Any = has_relative_attention_bias
UpperCAmelCase : Optional[Any] = rel_pos_bins
UpperCAmelCase : int = max_rel_pos
UpperCAmelCase : int = has_spatial_attention_bias
UpperCAmelCase : Optional[int] = rel_ad_pos_bins
UpperCAmelCase : str = max_rel_ad_pos
UpperCAmelCase : List[Any] = text_embed
UpperCAmelCase : Tuple = visual_embed
UpperCAmelCase : List[Any] = input_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : Dict = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[int] = version.parse('1.12' )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return 12
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 40 , _SCREAMING_SNAKE_CASE = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , """apply_ocr""" , _SCREAMING_SNAKE_CASE )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : str = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase : Any = processor.tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase : Optional[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase : Tuple = self._generate_dummy_images(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = dict(
processor(
_SCREAMING_SNAKE_CASE , text=_SCREAMING_SNAKE_CASE , boxes=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , ) )
return inputs
| 76
| 0
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class A :
'''simple docstring'''
@staticmethod
def a_ ( *__lowerCAmelCase : int , **__lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def __lowerCamelCase ( __a :Image ) -> str:
"""simple docstring"""
A__ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A (unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def a_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ = DepthEstimationPipeline(model=__lowerCAmelCase , image_processor=__lowerCAmelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def a_ ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
A__ = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , __lowerCAmelCase )
import datasets
A__ = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
A__ = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , __lowerCAmelCase , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def a_ ( self : Any ) -> List[str]:
"""simple docstring"""
pass
@slow
@require_torch
def a_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
A__ = """Intel/dpt-large"""
A__ = pipeline("""depth-estimation""" , model=__lowerCAmelCase )
A__ = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
A__ = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.6_6_2 )
@require_torch
def a_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 274
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
A : Tuple = None
A : Optional[Any] = logging.get_logger(__name__)
A : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A : List[str] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
A : List[str] = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
A : Optional[int] = '''▁'''
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[str] = AlbertTokenizer
def __init__( self : Tuple , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : str=False , __lowerCAmelCase : Union[str, Any]="[CLS]" , __lowerCAmelCase : int="[SEP]" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Dict="[SEP]" , __lowerCAmelCase : Union[str, Any]="<pad>" , __lowerCAmelCase : str="[CLS]" , __lowerCAmelCase : int="[MASK]" , **__lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
A__ = (
AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else mask_token
)
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , **__lowerCAmelCase , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def a_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self : Union[str, Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,)
| 274
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : str = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase ,stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> str:
_lowerCAmelCase : Any = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : List[str] ,_lowerCamelCase : List[Any] ) -> Any:
_lowerCAmelCase : List[str] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = val
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ) -> List[Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : int = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_lowerCAmelCase : Optional[Any] = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase ,requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : Optional[int] = qkv_bias
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : int ) -> Dict:
_lowerCAmelCase : Tuple = 364 if """coco""" in model_name else 224
_lowerCAmelCase : Optional[int] = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" ,eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Tuple = OPTConfig.from_pretrained("""facebook/opt-6.7b""" ,eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Any = TaConfig.from_pretrained("""google/flan-t5-xl""" ,dense_act_fn="""gelu""" ,bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : List[str] = TaConfig.from_pretrained("""google/flan-t5-xxl""" ,dense_act_fn="""gelu""" ,bos_token_id=1 ).to_dict()
_lowerCAmelCase : int = BlipaConfig(vision_config=_lowerCamelCase ,text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Dict=None ,_lowerCamelCase : int=False ) -> Tuple:
_lowerCAmelCase : Tuple = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : Dict = tokenizer("""\n""" ,add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase ,eos_token_id=_lowerCamelCase )
_lowerCAmelCase : int = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Tuple = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Optional[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = load_model_and_preprocess(
name=_lowerCamelCase ,model_type=_lowerCamelCase ,is_eval=_lowerCamelCase ,device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : str = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Any = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : Optional[Any] = key.replace("""Qformer.bert""" ,"""qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Union[str, Any] = key.replace("""self""" ,"""attention""" )
if "opt_proj" in key:
_lowerCAmelCase : str = key.replace("""opt_proj""" ,"""language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Union[str, Any] = key.replace("""t5_proj""" ,"""language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : Optional[int] = key.replace("""opt""" ,"""language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : List[str] = key.replace("""t5""" ,"""language""" )
_lowerCAmelCase : List[Any] = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = hf_model.load_state_dict(_lowerCamelCase ,strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Optional[Any] = load_demo_image()
_lowerCAmelCase : List[str] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : Dict = tokenizer(["""\n"""] ,return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Union[str, Any] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} ,image_mean=_lowerCamelCase ,image_std=_lowerCamelCase )
_lowerCAmelCase : List[str] = BlipaProcessor(image_processor=_lowerCamelCase ,tokenizer=_lowerCamelCase )
_lowerCAmelCase : Tuple = processor(images=_lowerCamelCase ,return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase ,_lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : List[str] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : List[Any] = hf_model(_lowerCamelCase ,_lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Dict = input_ids.masked_fill(input_ids == tokenizer.pad_token_id ,-100 )
_lowerCAmelCase : Any = hf_model(_lowerCamelCase ,_lowerCamelCase ,labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" ,original_logits[0, :3, :3] )
print("""First values of HF logits:""" ,logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] ,device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] ,_lowerCamelCase ,atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[str] = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] ,device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) ,_lowerCamelCase ,atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase ,return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : Dict = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase ,_lowerCamelCase ,do_sample=_lowerCamelCase ,num_beams=5 ,max_length=30 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.0 ,length_penalty=1.0 ,temperature=1 ,)
print("""Original generation:""" ,_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = input_ids.shape[1]
_lowerCAmelCase : Dict = processor.batch_decode(outputs[:, prompt_length:] ,skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : Dict = [text.strip() for text in output_text]
print("""HF generation:""" ,_lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f"nielsr/{model_name}" )
hf_model.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
_a : List[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
_a : Tuple = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 126
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[torch.FloatTensor] = None
_UpperCamelCase : torch.FloatTensor = None
_UpperCamelCase : Optional[Tuple[torch.FloatTensor]] = None
_UpperCamelCase : Optional[Tuple[torch.FloatTensor]] = None
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__=1 , a__=0 , a__=2 , a__=512 , a__="cls" , a__=False , a__=True , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
_lowerCAmelCase : Optional[Any] = project_dim
_lowerCAmelCase : List[str] = pooler_fn
_lowerCAmelCase : Any = learn_encoder
_lowerCAmelCase : Optional[int] = use_attention_mask
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = [R"pooler", R"logit_scale"]
_UpperCamelCase : List[Any] = [R"position_ids", R"predictions.decoder.bias"]
_UpperCamelCase : List[Any] = "roberta"
_UpperCamelCase : Optional[int] = RobertaSeriesConfig
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : str = XLMRobertaModel(a__ )
_lowerCAmelCase : Optional[Any] = nn.Linear(config.hidden_size , config.project_dim )
_lowerCAmelCase : List[Any] = getattr(a__ , """has_pre_transformation""" , a__ )
if self.has_pre_transformation:
_lowerCAmelCase : List[str] = nn.Linear(config.hidden_size , config.project_dim )
_lowerCAmelCase : Optional[int] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __A ( self , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , ):
_lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Optional[int] = self.base_model(
input_ids=a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_attentions=a__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=a__ , )
if self.has_pre_transformation:
_lowerCAmelCase : Optional[Any] = outputs["""hidden_states"""][-2]
_lowerCAmelCase : Optional[Any] = self.pre_LN(a__ )
_lowerCAmelCase : int = self.transformation_pre(a__ )
return TransformationModelOutput(
projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_lowerCAmelCase : Union[str, Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 126
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class __magic_name__ ( SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase : Optional[Any] = '''falcon'''
lowerCAmelCase : Optional[int] = ['''past_key_values''']
def __init__( self : Any ,_UpperCAmelCase : int=65024 ,_UpperCAmelCase : List[Any]=4544 ,_UpperCAmelCase : Union[str, Any]=32 ,_UpperCAmelCase : Tuple=71 ,_UpperCAmelCase : Union[str, Any]=1E-5 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : Dict=11 ,_UpperCAmelCase : Union[str, Any]=11 ,**_UpperCAmelCase : Dict ,):
_a : List[str] = vocab_size
# Backward compatibility with n_embed kwarg
_a : List[str] = kwargs.pop('n_embed' ,__UpperCAmelCase )
_a : List[Any] = hidden_size if n_embed is None else n_embed
_a : List[Any] = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : str = layer_norm_epsilon
_a : int = initializer_range
_a : str = use_cache
_a : str = hidden_dropout
_a : Tuple = attention_dropout
_a : Tuple = bos_token_id
_a : Union[str, Any] = eos_token_id
_a : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
_a : int = alibi
_a : Any = new_decoder_architecture
_a : Optional[int] = multi_query # Ignored when new_decoder_architecture is True
_a : Union[str, Any] = parallel_attn
_a : str = bias
super().__init__(bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
@property
def __lowercase ( self : Optional[int] ):
return self.hidden_size // self.num_attention_heads
@property
def __lowercase ( self : List[str] ):
return not self.alibi
| 89
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
_lowerCAmelCase = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
_lowerCAmelCase = {
'''ctrl''': 256,
}
_lowerCAmelCase = {
'''Pregnancy''': 16_8629,
'''Christianity''': 7675,
'''Explain''': 10_6423,
'''Fitness''': 6_3440,
'''Saving''': 6_3163,
'''Ask''': 2_7171,
'''Ass''': 9_5985,
'''Joke''': 16_3509,
'''Questions''': 4_5622,
'''Thoughts''': 4_9605,
'''Retail''': 5_2342,
'''Feminism''': 16_4338,
'''Writing''': 1_1992,
'''Atheism''': 19_2263,
'''Netflix''': 4_8616,
'''Computing''': 3_9639,
'''Opinion''': 4_3213,
'''Alone''': 4_4967,
'''Funny''': 5_8917,
'''Gaming''': 4_0358,
'''Human''': 4088,
'''India''': 1331,
'''Joker''': 7_7138,
'''Diet''': 3_6206,
'''Legal''': 1_1859,
'''Norman''': 4939,
'''Tip''': 7_2689,
'''Weight''': 5_2343,
'''Movies''': 4_6273,
'''Running''': 2_3425,
'''Science''': 2090,
'''Horror''': 3_7793,
'''Confession''': 6_0572,
'''Finance''': 1_2250,
'''Politics''': 1_6360,
'''Scary''': 19_1985,
'''Support''': 1_2654,
'''Technologies''': 3_2516,
'''Teenage''': 6_6160,
'''Event''': 3_2769,
'''Learned''': 6_7460,
'''Notion''': 18_2770,
'''Wikipedia''': 3_7583,
'''Books''': 6665,
'''Extract''': 7_6050,
'''Confessions''': 10_2701,
'''Conspiracy''': 7_5932,
'''Links''': 6_3674,
'''Narcissus''': 15_0425,
'''Relationship''': 5_4766,
'''Relationships''': 13_4796,
'''Reviews''': 4_1671,
'''News''': 4256,
'''Translation''': 2_6820,
'''multilingual''': 12_8406,
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = set()
lowerCAmelCase__ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : List[Any] = char
lowerCAmelCase__ : Optional[Any] = set(UpperCamelCase )
return pairs
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = CONTROL_CODES
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="<unk>" ,**__UpperCAmelCase ) -> Optional[int]:
super().__init__(unk_token=__UpperCAmelCase ,**__UpperCAmelCase )
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase__ : int = json.load(__UpperCAmelCase )
lowerCAmelCase__ : Any = {v: k for k, v in self.encoder.items()}
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase__ : Union[str, Any] = merges_handle.read().split("""\n""" )[1:-1]
lowerCAmelCase__ : Optional[Any] = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ : int = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : List[Any] = {}
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return len(self.encoder )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : Optional[Any] = tuple(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowerCAmelCase__ : Any = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ : List[str] = min(__UpperCAmelCase ,key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = bigram
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Dict = 0
while i < len(__UpperCAmelCase ):
try:
lowerCAmelCase__ : Optional[Any] = word.index(__UpperCAmelCase ,__UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : Dict = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : Tuple = tuple(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
lowerCAmelCase__ : Dict = get_pairs(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = """@@ """.join(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = word[:-4]
lowerCAmelCase__ : Optional[Any] = word
return word
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Any = re.findall(R"""\S+\n?""" ,__UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
return self.encoder.get(__UpperCAmelCase ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
return self.decoder.get(__UpperCAmelCase ,self.unk_token )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Any = """ """.join(__UpperCAmelCase ).replace("""@@ """ ,"""""" ).strip()
return out_string
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : List[Any] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : Optional[int] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__UpperCAmelCase ,ensure_ascii=__UpperCAmelCase ) + """\n""" )
lowerCAmelCase__ : int = 0
with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase__ : Dict = token_index
writer.write(""" """.join(__UpperCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 37
| 0
|
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=1E-1_2 ) -> Tuple:
UpperCamelCase_: Dict = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowerCamelCase , axis=1 ) , a_min=lowerCamelCase ) ).T
UpperCamelCase_: Optional[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowerCamelCase , axis=1 ) , a_min=lowerCamelCase ) ).T
return jnp.matmul(lowerCamelCase , norm_emb_a.T )
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
__UpperCamelCase : CLIPConfig
__UpperCamelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Tuple = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase_: List[str] = nn.Dense(self.config.projection_dim , use_bias=snake_case_ , dtype=self.dtype )
UpperCamelCase_: List[str] = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
UpperCamelCase_: Tuple = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase_: List[str] = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
UpperCamelCase_: Dict = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self : Any , snake_case_ : str ):
UpperCamelCase_: Any = self.vision_model(snake_case_ )[1]
UpperCamelCase_: Union[str, Any] = self.visual_projection(snake_case_ )
UpperCamelCase_: Dict = jax_cosine_distance(snake_case_ , self.special_care_embeds )
UpperCamelCase_: str = jax_cosine_distance(snake_case_ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase_: Optional[Any] = 0.0
UpperCamelCase_: Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase_: Optional[int] = jnp.round(snake_case_ , 3 )
UpperCamelCase_: List[str] = jnp.any(special_scores > 0 , axis=1 , keepdims=snake_case_ )
# Use a lower threshold if an image has any special care concept
UpperCamelCase_: Dict = is_special_care * 0.01
UpperCamelCase_: Optional[Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase_: List[Any] = jnp.round(snake_case_ , 3 )
UpperCamelCase_: List[str] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : int = CLIPConfig
__UpperCamelCase : Union[str, Any] = """clip_input"""
__UpperCamelCase : int = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Dict , snake_case_ : CLIPConfig , snake_case_ : Optional[Tuple] = None , snake_case_ : int = 0 , snake_case_ : jnp.dtype = jnp.floataa , snake_case_ : bool = True , **snake_case_ : Optional[Any] , ):
if input_shape is None:
UpperCamelCase_: Union[str, Any] = (1, 224, 224, 3)
UpperCamelCase_: Optional[Any] = self.module_class(config=snake_case_ , dtype=snake_case_ , **snake_case_ )
super().__init__(snake_case_ , snake_case_ , input_shape=snake_case_ , seed=snake_case_ , dtype=snake_case_ , _do_init=_do_init )
def lowerCAmelCase__ ( self : str , snake_case_ : jax.random.KeyArray , snake_case_ : Tuple , snake_case_ : FrozenDict = None ):
# init input tensor
UpperCamelCase_: List[Any] = jax.random.normal(snake_case_ , snake_case_ )
UpperCamelCase_: Any = jax.random.split(snake_case_ )
UpperCamelCase_: Optional[int] = {"""params""": params_rng, """dropout""": dropout_rng}
UpperCamelCase_: Tuple = self.module.init(snake_case_ , snake_case_ )["""params"""]
return random_params
def __call__( self : str , snake_case_ : Dict , snake_case_ : dict = None , ):
UpperCamelCase_: Optional[int] = jnp.transpose(snake_case_ , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(snake_case_ , dtype=jnp.floataa ) , rngs={} , )
| 360
|
def A__ ( lowerCamelCase , lowerCamelCase ) -> list:
UpperCamelCase_: Optional[int] = word.split()
def justify(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: Tuple = max_width - width
UpperCamelCase_: Optional[Any] = len(lowerCamelCase )
if len(lowerCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCamelCase_: List[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCamelCase_: Optional[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCamelCase_: List[str] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowerCamelCase ):
num_spaces_between_words_list[i] += 1
UpperCamelCase_: Dict = []
for i in range(lowerCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowerCamelCase )
UpperCamelCase_: Optional[int] = []
UpperCamelCase_: list[str] = []
UpperCamelCase_: List[str] = 0
for word in words:
if width + len(lowerCamelCase ) + len(lowerCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowerCamelCase )
width += len(lowerCamelCase )
else:
# justify the line and add it to result
answer.append(justify(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
# reset new line and new width
UpperCamelCase_, UpperCamelCase_: List[str] = [word], len(lowerCamelCase )
UpperCamelCase_: List[str] = max_width - width - len(lowerCamelCase )
answer.append(""" """.join(lowerCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 223
| 0
|
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : Any ) -> int:
lowerCamelCase_ = len(__lowerCAmelCase )
lowerCamelCase_ = len(matrix[0] )
lowerCamelCase_ = min(__lowerCAmelCase , __lowerCAmelCase )
for row in range(__lowerCAmelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , __lowerCAmelCase ):
lowerCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(__lowerCAmelCase , __lowerCAmelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCamelCase_ = True
for i in range(row + 1 , __lowerCAmelCase ):
if matrix[i][row] != 0:
lowerCamelCase_ = matrix[i], matrix[row]
lowerCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(__lowerCAmelCase ):
lowerCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 183
|
import random
from .binary_exp_mod import bin_exp_mod
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=1000 ) -> str:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
snake_case__ : Tuple = n - 1
snake_case__ : Tuple = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
snake_case__ : List[str] = 0
while count < prec:
snake_case__ : List[str] = random.randint(2 , n - 1 )
snake_case__ : Optional[Any] = bin_exp_mod(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if b != 1:
snake_case__ : List[Any] = True
for _ in range(__lowerCAmelCase ):
if b == n - 1:
snake_case__ : List[str] = False
break
snake_case__ : str = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
A__ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 230
| 0
|
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = PriorTransformer
SCREAMING_SNAKE_CASE_ : List[str] = """hidden_states"""
@property
def lowercase_ ( self : Dict)-> str:
'''simple docstring'''
__lowerCAmelCase: str = 4
__lowerCAmelCase: int = 8
__lowerCAmelCase: int = 7
__lowerCAmelCase: str = floats_tensor((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = floats_tensor((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: Any = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : str=0)-> str:
'''simple docstring'''
torch.manual_seed(UpperCamelCase__)
__lowerCAmelCase: List[Any] = 4
__lowerCAmelCase: Dict = 8
__lowerCAmelCase: int = 7
__lowerCAmelCase: List[str] = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: Tuple = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowercase_ ( self : Dict)-> List[Any]:
'''simple docstring'''
return (4, 8)
@property
def lowercase_ ( self : Optional[int])-> int:
'''simple docstring'''
return (4, 8)
def lowercase_ ( self : Optional[int])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: str = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
__lowerCAmelCase: Any = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : List[Any])-> int:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(UpperCamelCase__)
__lowerCAmelCase: Dict = model(**self.dummy_input)[0]
assert hidden_states is not None, "Make sure output is not None"
def lowercase_ ( self : List[str])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.prepare_init_args_and_inputs_for_common()
__lowerCAmelCase: Tuple = self.model_class(**UpperCamelCase__)
__lowerCAmelCase: List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase: List[Any] = [*signature.parameters.keys()]
__lowerCAmelCase: Any = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , UpperCamelCase__)
def lowercase_ ( self : Optional[int])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: int = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy")
__lowerCAmelCase: Union[str, Any] = model.to(UpperCamelCase__)
if hasattr(UpperCamelCase__ , "set_default_attn_processor"):
model.set_default_attn_processor()
__lowerCAmelCase: str = self.get_dummy_seed_input()
with torch.no_grad():
__lowerCAmelCase: Dict = model(**UpperCamelCase__)[0]
__lowerCAmelCase: Dict = output[0, :5].flatten().cpu()
print(UpperCamelCase__)
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowerCAmelCase: List[str] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239])
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2))
@slow
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : int , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : str=7_6_8 , UpperCamelCase__ : int=7_7 , UpperCamelCase__ : Any=0)-> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(UpperCamelCase__)
__lowerCAmelCase: List[Any] = batch_size
__lowerCAmelCase: Any = embedding_dim
__lowerCAmelCase: Dict = num_embeddings
__lowerCAmelCase: Dict = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: str = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__)
__lowerCAmelCase: int = torch.randn((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowercase_ ( self : List[Any])-> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[3_7, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
])
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: List[str] = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior")
model.to(UpperCamelCase__)
__lowerCAmelCase: Dict = self.get_dummy_seed_input(seed=UpperCamelCase__)
with torch.no_grad():
__lowerCAmelCase: Optional[Any] = model(**UpperCamelCase__)[0]
assert list(sample.shape) == [1, 7_6_8]
__lowerCAmelCase: Dict = sample[0, :8].flatten().cpu()
print(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = torch.tensor(UpperCamelCase__)
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3)
| 353
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
__A = "src/transformers"
# Matches is_xxx_available()
__A = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__A = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__A = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__A = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__A = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__A = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__A = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__A = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__A = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__A = re.compile(r"^\s*try:")
# Catches a line with else:
__A = re.compile(r"^\s*else:")
def a__ ( __SCREAMING_SNAKE_CASE ) -> Any:
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
__lowerCAmelCase: Union[str, Any] = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
with open(__SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase: Optional[int] = f.readlines()
__lowerCAmelCase: Dict = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowerCAmelCase: Optional[Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
__lowerCAmelCase: Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[Any] = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
__lowerCAmelCase: List[Any] = re.findall("\[([^\]]+)\]" , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
__lowerCAmelCase: str = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowerCAmelCase: str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
__lowerCAmelCase: Tuple = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowerCAmelCase: Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCAmelCase: Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCAmelCase: Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
__lowerCAmelCase: Optional[Any] = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
__lowerCAmelCase: Union[str, Any] = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(", " )
__lowerCAmelCase: int = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
__lowerCAmelCase: Tuple = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(", " )
__lowerCAmelCase: Tuple = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
__lowerCAmelCase: Union[str, Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowerCAmelCase: str = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
__lowerCAmelCase: List[Any] = lines[line_index]
__lowerCAmelCase: Tuple = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowerCAmelCase: Any = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowerCAmelCase: Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCAmelCase: Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCAmelCase: List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
__lowerCAmelCase: Optional[int] = lines[line_index]
__lowerCAmelCase: Any = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
__lowerCAmelCase: Union[str, Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
def find_duplicates(__SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowerCAmelCase: Optional[int] = []
for key in import_dict_objects.keys():
__lowerCAmelCase: Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
__lowerCAmelCase: Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowerCAmelCase: Union[str, Any] = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def a__ ( ) -> Tuple:
__lowerCAmelCase: Optional[Any] = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowerCAmelCase: List[str] = os.path.join(__SCREAMING_SNAKE_CASE , "__init__.py" )
__lowerCAmelCase: int = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
__lowerCAmelCase: Optional[Any] = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase: Union[str, Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError("\n\n".join(__SCREAMING_SNAKE_CASE ) )
def a__ ( ) -> Any:
__lowerCAmelCase: Optional[int] = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob("*.py" ) ) ) == 0:
continue
__lowerCAmelCase: Optional[int] = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Tuple = short_path.replace(os.path.sep , "." )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowerCAmelCase: Dict = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Dict = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
__A = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def a__ ( ) -> Optional[int]:
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase: Optional[Any] = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__SCREAMING_SNAKE_CASE , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowerCAmelCase: Optional[int] = spec.loader.load_module()
__lowerCAmelCase: str = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase: Optional[int] = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 108
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''encoder-decoder'''
UpperCAmelCase__ = True
def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ = kwargs.pop('''encoder''')
A__ = encoder_config.pop('''model_type''')
A__ = kwargs.pop('''decoder''')
A__ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
| 14
|
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''SpeechT5FeatureExtractor'''
UpperCAmelCase__ = '''SpeechT5Tokenizer'''
def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]:
'''simple docstring'''
A__ = kwargs.pop('''audio''' , UpperCAmelCase__)
A__ = kwargs.pop('''text''' , UpperCAmelCase__)
A__ = kwargs.pop('''text_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
elif text is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if audio_target is not None:
A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_values''']
elif text_target is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = kwargs.pop('''input_values''' , UpperCAmelCase__)
A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__)
A__ = kwargs.pop('''labels''' , UpperCAmelCase__)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
elif input_ids is not None:
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]):
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = self.feature_extractor.feature_size
A__ = self.feature_extractor.num_mel_bins
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
A__ = feature_size_hack
A__ = targets['''input_values''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
| 14
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
_lowerCamelCase : Tuple = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
_lowerCamelCase : List[str] = '''</w>'''
_lowerCamelCase : Tuple = '''@@ '''
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
# Speech2Text2 has no max input length
_lowerCamelCase : Union[str, Any] = {'''facebook/s2t-wav2vec2-large-en-de''': 10_24}
class lowercase ( a ):
lowercase__ : Any = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Optional[int]="</s>" , _UpperCamelCase : List[str]="<unk>" , _UpperCamelCase : Tuple=False , _UpperCamelCase : List[str]=None , **_UpperCamelCase : Optional[int] , ) -> List[str]:
'''simple docstring'''
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , do_lower_case=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = do_lower_case
with open(_UpperCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
else:
with open(_UpperCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split()[:2] ) for merge in merges]
SCREAMING_SNAKE_CASE = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
@property
def __snake_case( self : Union[str, Any] ) -> int:
'''simple docstring'''
return len(self.decoder )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case( self : List[str] , _UpperCamelCase : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = get_pairs(_UpperCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(_UpperCamelCase , key=lambda _UpperCamelCase : self.bpe_ranks.get(_UpperCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(_UpperCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(_UpperCamelCase , _UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(_UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(_UpperCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(_UpperCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = " ".join(_UpperCamelCase )
if word == "\n " + BPE_TOKEN_MERGES:
SCREAMING_SNAKE_CASE = "\n" + BPE_TOKEN_MERGES
if word.endswith(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = word.replace(_UpperCamelCase , "" )
SCREAMING_SNAKE_CASE = word.replace(" " , _UpperCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def __snake_case( self : int , _UpperCamelCase : Dict ) -> List[Any]:
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
SCREAMING_SNAKE_CASE = text.lower()
SCREAMING_SNAKE_CASE = text.split()
SCREAMING_SNAKE_CASE = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_UpperCamelCase ).split(" " ) ) )
return split_tokens
def __snake_case( self : str , _UpperCamelCase : str ) -> int:
'''simple docstring'''
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def __snake_case( self : Optional[int] , _UpperCamelCase : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.decoder.get(_UpperCamelCase , self.unk_token )
return result
def __snake_case( self : Any , _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = " ".join(_UpperCamelCase )
# make sure @@ tokens are concatenated
SCREAMING_SNAKE_CASE = "".join(string.split(_UpperCamelCase ) )
return string
def __snake_case( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(_UpperCamelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 206
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowercase ( a ):
lowercase__ : List[str] = ["""pixel_values"""]
def __init__( self : List[str] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : float = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : bool = True , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : Dict , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 384}
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
# Default value set here for backwards compatibility where the value in config is None
SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else 224 / 256
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case( self : Optional[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : float , _UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}" )
SCREAMING_SNAKE_CASE = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
SCREAMING_SNAKE_CASE = int(shortest_edge / crop_pct )
SCREAMING_SNAKE_CASE = get_resize_output_image_size(_UpperCamelCase , size=_UpperCamelCase , default_to_square=_UpperCamelCase )
SCREAMING_SNAKE_CASE = resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_UpperCamelCase , size=(shortest_edge, shortest_edge) , data_format=_UpperCamelCase , **_UpperCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_UpperCamelCase , size=(shortest_edge, shortest_edge) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Tuple , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : List[str] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : float = None , _UpperCamelCase : PILImageResampling = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : Any , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else self.crop_pct
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
SCREAMING_SNAKE_CASE = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , crop_pct=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 206
| 1
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : List[str] = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 154
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Optional[Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 154
| 1
|
import sys
from collections import defaultdict
class lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> Dict:
UpperCAmelCase_= []
def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : str ) -> Union[str, Any]:
return self.node_position[vertex]
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ) -> List[str]:
UpperCAmelCase_= pos
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCAmelCase_= 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCAmelCase_= 2 * start + 1
else:
UpperCAmelCase_= 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCAmelCase_, UpperCAmelCase_= heap[smallest_child], positions[smallest_child]
UpperCAmelCase_, UpperCAmelCase_= (
heap[start],
positions[start],
)
UpperCAmelCase_, UpperCAmelCase_= temp, tempa
UpperCAmelCase_= self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __SCREAMING_SNAKE_CASE )
self.top_to_bottom(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
UpperCAmelCase_= position[index]
while index != 0:
UpperCAmelCase_= int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCAmelCase_= heap[parent]
UpperCAmelCase_= position[parent]
self.set_position(position[parent] , __SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_= val
UpperCAmelCase_= temp
self.set_position(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
break
UpperCAmelCase_= parent
else:
UpperCAmelCase_= val
UpperCAmelCase_= temp
self.set_position(__SCREAMING_SNAKE_CASE , 0 )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ) -> str:
UpperCAmelCase_= len(__SCREAMING_SNAKE_CASE ) // 2 - 1
for i in range(__SCREAMING_SNAKE_CASE , -1 , -1 ):
self.top_to_bottom(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] ) -> Any:
UpperCAmelCase_= positions[0]
UpperCAmelCase_= sys.maxsize
self.top_to_bottom(__SCREAMING_SNAKE_CASE , 0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return temp
def __a ( lowerCAmelCase_ : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_= Heap()
UpperCAmelCase_= [0] * len(lowercase__ )
UpperCAmelCase_= [-1] * len(lowercase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCAmelCase_= [] # Heap of Distance of vertices from their neighboring vertex
UpperCAmelCase_= []
for vertex in range(len(lowercase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(lowercase__ )
heap.node_position.append(lowercase__ )
UpperCAmelCase_= []
UpperCAmelCase_= 1
UpperCAmelCase_= sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCAmelCase_= 0
UpperCAmelCase_= distance
heap.heapify(lowercase__ ,lowercase__ )
for _ in range(1 ,len(lowercase__ ) ):
UpperCAmelCase_= heap.delete_minimum(lowercase__ ,lowercase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCAmelCase_= 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(lowercase__ )]
):
UpperCAmelCase_= distance
heap.bottom_to_top(
lowercase__ ,heap.get_position(lowercase__ ) ,lowercase__ ,lowercase__ )
UpperCAmelCase_= vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input('''Enter number of edges: ''').strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 363
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : Any , *__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[int]=None , **__UpperCAmelCase : Dict ) -> Optional[int]:
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_= eval_examples
UpperCAmelCase_= post_process_function
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Optional[Dataset] = None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "eval" , **__UpperCAmelCase : Any , ) -> Dict[str, float]:
UpperCAmelCase_= gen_kwargs.copy()
UpperCAmelCase_= (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
UpperCAmelCase_= (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
UpperCAmelCase_= gen_kwargs
UpperCAmelCase_= self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_= self.get_eval_dataloader(__UpperCAmelCase )
UpperCAmelCase_= self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_= self.compute_metrics
UpperCAmelCase_= None
UpperCAmelCase_= time.time()
UpperCAmelCase_= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_= eval_loop(
__UpperCAmelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
UpperCAmelCase_= compute_metrics
UpperCAmelCase_= self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase_= self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_= metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
else:
UpperCAmelCase_= output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_= self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCAmelCase )
return metrics
def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : str = "test" , **__UpperCAmelCase : List[str] ) -> Tuple:
UpperCAmelCase_= gen_kwargs.copy()
UpperCAmelCase_= self.get_test_dataloader(__UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_= self.compute_metrics
UpperCAmelCase_= None
UpperCAmelCase_= time.time()
UpperCAmelCase_= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_= eval_loop(
__UpperCAmelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
UpperCAmelCase_= compute_metrics
UpperCAmelCase_= self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_= self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , """predict""" )
UpperCAmelCase_= self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_= metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCAmelCase )
| 277
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Dict = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCamelCase__ : List[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
lowerCamelCase__ : List[Any] = {"""unk_token""": """<unk>"""}
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase__ ) )
lowerCamelCase__ : Union[str, Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCamelCase__ : Dict = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Tuple ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Optional[Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Any , **UpperCamelCase__: int ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : int = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : List[str] = self.get_rust_tokenizer()
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Any = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
lowerCamelCase__ : Dict = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase__ : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ : str = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
lowerCamelCase__ : List[str] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Tuple = self.get_tokenizer()
lowerCamelCase__ : int = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.prepare_image_inputs()
lowerCamelCase__ : Dict = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.get_image_processor()
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : Dict = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = """lower newer"""
lowerCamelCase__ : Optional[int] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[Any] = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : Tuple = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[str] = """lower newer"""
lowerCamelCase__ : Optional[Any] = self.prepare_image_inputs()
lowerCamelCase__ : Optional[int] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = self.prepare_image_inputs()
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : Optional[int] = processor(images=UpperCamelCase__ , visual_prompt=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : str = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : List[Any] = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
| 41
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[Any] =logging.get_logger(__name__)
_A : Dict =['''model.decoder.embed_positions.weights''']
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
if "emb" in name:
lowerCamelCase__ : Dict = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowerCamelCase__ : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowerCamelCase__ : List[str] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowerCamelCase__ : Optional[int] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowerCamelCase__ : Dict = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowerCamelCase__ : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCamelCase__ : int = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple[Dict, Dict]:
lowerCamelCase__ : int = list(state_dict.keys() )
lowerCamelCase__ : Tuple = {}
for key in keys:
lowerCamelCase__ : Any = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = rename_keys(UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCamelCase__ : Union[str, Any] = val[:hidden_size, :]
lowerCamelCase__ : Any = val[hidden_size : 2 * hidden_size, :]
lowerCamelCase__ : Optional[int] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCamelCase__ : str = val
else:
lowerCamelCase__ : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowerCamelCase__ : int = 1024
lowerCamelCase__ : int = 24
lowerCamelCase__ : List[Any] = 16
elif checkpoint == "medium":
lowerCamelCase__ : Any = 1536
lowerCamelCase__ : Union[str, Any] = 48
lowerCamelCase__ : Optional[int] = 24
elif checkpoint == "large":
lowerCamelCase__ : Optional[Any] = 2048
lowerCamelCase__ : Dict = 48
lowerCamelCase__ : List[Any] = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowerCamelCase__ : Any = MusicgenDecoderConfig(
hidden_size=UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase , num_attention_heads=UpperCamelCase , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="cpu" ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = MusicGen.get_pretrained(UpperCamelCase , device=UpperCamelCase )
lowerCamelCase__ : List[Any] = decoder_config_from_checkpoint(UpperCamelCase )
lowerCamelCase__ : Any = fairseq_model.lm.state_dict()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = rename_state_dict(
UpperCamelCase , hidden_size=decoder_config.hidden_size )
lowerCamelCase__ : str = TaEncoderModel.from_pretrained("""t5-base""" )
lowerCamelCase__ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowerCamelCase__ : Optional[int] = MusicgenForCausalLM(UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCamelCase__ , lowerCamelCase__ : List[str] = decoder.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowerCamelCase__ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase , audio_encoder=UpperCamelCase , decoder=UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase )
# check we can do a forward pass
lowerCamelCase__ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCamelCase__ : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowerCamelCase__ : str = AutoTokenizer.from_pretrained("""t5-base""" )
lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowerCamelCase__ : Optional[int] = MusicgenProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
# set the appropriate bos/pad token ids
lowerCamelCase__ : Union[str, Any] = 2048
lowerCamelCase__ : List[str] = 2048
# set other default generation config params
lowerCamelCase__ : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(UpperCamelCase )
processor.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
_A : List[str] =parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 41
| 1
|
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int ) -> int:
while b:
_UpperCAmelCase , _UpperCAmelCase : Dict = b, a % b
return a
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int ) -> int:
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
print(F'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(F'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(F'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(F'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(F'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(F'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(F'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(F'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 189
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a ( UpperCAmelCase ):
_lowercase = (PNDMScheduler,)
_lowercase = (("num_inference_steps", 5_0),)
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**A_ )
return config
def _UpperCAmelCase ( self , A_=0 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : int = dict(self.forward_default_kwargs )
_UpperCAmelCase : Optional[Any] = kwargs.pop("num_inference_steps" , A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : Dict = 0.1 * sample
_UpperCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Dict = self.get_scheduler_config(**A_ )
_UpperCAmelCase : Any = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals
_UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
_UpperCAmelCase : str = scheduler_class.from_pretrained(A_ )
new_scheduler.set_timesteps(A_ )
# copy over dummy past residuals
_UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Any = new_scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCAmelCase : Union[str, Any] = scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = new_scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self , A_=0 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Any = dict(self.forward_default_kwargs )
_UpperCAmelCase : int = kwargs.pop("num_inference_steps" , A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : Any = 0.1 * sample
_UpperCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
_UpperCAmelCase : Dict = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase : Any = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
_UpperCAmelCase : str = scheduler_class.from_pretrained(A_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(A_ )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase : List[Any] = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Union[str, Any] = new_scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCAmelCase : Optional[Any] = scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : List[str] = new_scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(**A_ )
_UpperCAmelCase : List[str] = scheduler_class(**A_ )
_UpperCAmelCase : List[str] = 10
_UpperCAmelCase : Optional[int] = self.dummy_model()
_UpperCAmelCase : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(A_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_UpperCAmelCase : Dict = model(A_ , A_ )
_UpperCAmelCase : int = scheduler.step_prk(A_ , A_ , A_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_UpperCAmelCase : Any = model(A_ , A_ )
_UpperCAmelCase : Any = scheduler.step_plms(A_ , A_ , A_ ).prev_sample
return sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_UpperCAmelCase : str = kwargs.pop("num_inference_steps" , A_ )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : str = 0.1 * sample
if num_inference_steps is not None and hasattr(A_ , "set_timesteps" ):
scheduler.set_timesteps(A_ )
elif num_inference_steps is not None and not hasattr(A_ , "set_timesteps" ):
_UpperCAmelCase : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_UpperCAmelCase : Any = dummy_past_residuals[:]
_UpperCAmelCase : Any = scheduler.step_prk(A_ , 0 , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = scheduler.step_prk(A_ , 1 , A_ , **A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_UpperCAmelCase : Optional[int] = scheduler.step_plms(A_ , 0 , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = scheduler.step_plms(A_ , 1 , A_ , **A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A_ )
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config(steps_offset=1 )
_UpperCAmelCase : str = scheduler_class(**A_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = 27
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : str = self.dummy_sample
_UpperCAmelCase : str = 0.1 * sample
_UpperCAmelCase : str = self.get_scheduler_config()
_UpperCAmelCase : Tuple = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_UpperCAmelCase : Dict = scheduler.step_prk(A_ , A_ , A_ ).prev_sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(A_ ):
_UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_UpperCAmelCase : Dict = self.get_scheduler_config()
_UpperCAmelCase : Union[str, Any] = scheduler_class(**A_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.full_loop()
_UpperCAmelCase : int = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1e-2
assert abs(result_mean.item() - 0.25_80 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1e-2
assert abs(result_mean.item() - 0.08_78 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
_UpperCAmelCase : Dict = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Optional[int] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1e-2
assert abs(result_mean.item() - 0.29_95 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
_UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Dict = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1e-2
assert abs(result_mean.item() - 0.24_34 ) < 1e-3
| 189
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A = logging.get_logger(__name__)
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["""pixel_values"""]
def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : Union[str, Any]=PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , **UpperCamelCase__ : Tuple , )-> None:
'''simple docstring'''
__lowerCAmelCase: Dict = do_resize
__lowerCAmelCase: Optional[int] = do_rescale
__lowerCAmelCase: int = size_divisor
__lowerCAmelCase: List[Any] = resample
super().__init__(**UpperCamelCase__)
def lowercase_ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[ChannelDimension] = None , **UpperCamelCase__ : Optional[Any])-> np.ndarray:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = get_image_size(UpperCamelCase__)
# Rounds the height and width down to the closest multiple of size_divisor
__lowerCAmelCase: Optional[Any] = height // size_divisor * size_divisor
__lowerCAmelCase: List[str] = width // size_divisor * size_divisor
__lowerCAmelCase: Tuple = resize(UpperCamelCase__ , (new_h, new_w) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
return image
def lowercase_ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[ChannelDimension] = None , **UpperCamelCase__ : str)-> np.ndarray:
'''simple docstring'''
return rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Dict , UpperCamelCase__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[TensorType, str]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Any , )-> BatchFeature:
'''simple docstring'''
__lowerCAmelCase: str = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase: Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase: str = size_divisor if size_divisor is not None else self.size_divisor
__lowerCAmelCase: Optional[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing")
__lowerCAmelCase: List[str] = make_list_of_images(UpperCamelCase__)
if not valid_images(UpperCamelCase__):
raise ValueError("Invalid image(s)")
# All transformations expect numpy arrays.
__lowerCAmelCase: Dict = [to_numpy_array(UpperCamelCase__) for img in images]
if do_resize:
__lowerCAmelCase: Tuple = [self.resize(UpperCamelCase__ , size_divisor=UpperCamelCase__ , resample=UpperCamelCase__) for image in images]
if do_rescale:
__lowerCAmelCase: Dict = [self.rescale(UpperCamelCase__ , scale=1 / 2_5_5) for image in images]
__lowerCAmelCase: List[Any] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__) for image in images]
__lowerCAmelCase: List[Any] = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__)
| 217
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Tuple = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ : str = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : int = """tokenizer_file"""
SCREAMING_SNAKE_CASE_ : List[str] = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def lowercase_ ( self : List[Any])-> Dict:
'''simple docstring'''
super().setUp()
__lowerCAmelCase: Optional[Any] = BloomTokenizerFast.from_pretrained("bigscience/tokenizer")
tokenizer.save_pretrained(self.tmpdirname)
def lowercase_ ( self : List[Any] , **UpperCamelCase__ : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__)
def lowercase_ ( self : Union[str, Any])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: str = self.get_rust_tokenizer()
__lowerCAmelCase: int = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
__lowerCAmelCase: List[str] = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
__lowerCAmelCase: List[str] = tokenizer.batch_encode_plus(UpperCamelCase__)["input_ids"]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: List[Any] = tokenizer.batch_decode(UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Tuple=6)-> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCAmelCase: Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__lowerCAmelCase: Dict = "This is a simple input"
__lowerCAmelCase: str = ["This is a simple input 1", "This is a simple input 2"]
__lowerCAmelCase: int = ("This is a simple input", "This is a pair")
__lowerCAmelCase: Union[str, Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__)
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding")
__lowerCAmelCase: Tuple = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" , )
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Dict = self.get_rust_tokenizer()
__lowerCAmelCase: List[str] = load_dataset("xnli" , "all_languages" , split="test" , streaming=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = next(iter(UpperCamelCase__))["premise"] # pick up one data
__lowerCAmelCase: Any = list(sample_data.values())
__lowerCAmelCase: int = list(map(tokenizer.encode , UpperCamelCase__))
__lowerCAmelCase: str = [tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__) for x in output_tokens]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Optional[int])-> str:
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 217
| 1
|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowercase ( __a ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
with open(UpperCamelCase__ , encoding='''utf-8''' ) as input_file:
__UpperCamelCase =re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
__UpperCamelCase =input_file.read()
__UpperCamelCase =regexp.search(UpperCamelCase__ )
return match
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : str ) -> int:
'''simple docstring'''
with open(UpperCamelCase__ , encoding='''utf-8''' ) as input_file:
__UpperCamelCase =re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
__UpperCamelCase =input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__UpperCamelCase =regexp.finditer(UpperCamelCase__ )
__UpperCamelCase =[match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =Path('''./datasets''' )
__UpperCamelCase =list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCamelCase__ ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
'''simple docstring'''
__UpperCamelCase =Path('''./datasets''' )
__UpperCamelCase =list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCamelCase__ ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 85
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''albert'''
def __init__( self : List[Any] , UpperCamelCase__ : List[Any]=30000 , UpperCamelCase__ : int=128 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Union[str, Any]=64 , UpperCamelCase__ : Any=16384 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Optional[int]="gelu_new" , UpperCamelCase__ : int=0 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Tuple=1E-12 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[Any]=3 , **UpperCamelCase__ : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =vocab_size
__UpperCamelCase =embedding_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_hidden_groups
__UpperCamelCase =num_attention_heads
__UpperCamelCase =inner_group_num
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =classifier_dropout_prob
__UpperCamelCase =position_embedding_type
class _lowercase ( __a ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCamelCase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 85
| 1
|
import qiskit
def a ( _UpperCAmelCase : int , _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCAmelCase : Optional[Any] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__UpperCAmelCase : str = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
__A =half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 226
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCAmelCase__ :
'''simple docstring'''
UpperCamelCase = None
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase : Optional[int] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , a_ )
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Union[str, Any] = os.path.join(a_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(a_ )
__UpperCAmelCase : Any = self.feature_extraction_class.from_json_file(a_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : List[str] = feat_extract_first.save_pretrained(a_ )[0]
check_json_file_has_correct_format(a_ )
__UpperCAmelCase : Optional[Any] = self.feature_extraction_class.from_pretrained(a_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : int = self.feature_extraction_class()
self.assertIsNotNone(a_ )
| 226
| 1
|
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class A__ ( __SCREAMING_SNAKE_CASE ):
def snake_case_ ( self ) -> int:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(_SCREAMING_SNAKE_CASE )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self._create_example_records()
A_ = Dataset.from_list(_SCREAMING_SNAKE_CASE )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_SCREAMING_SNAKE_CASE ):
self.assertDictEqual(_SCREAMING_SNAKE_CASE , example_records[i] )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self._create_example_records()
A_ = Dataset.from_list(_SCREAMING_SNAKE_CASE )
A_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def snake_case_ ( self ) -> Dict: # checks what happens with missing columns
'''simple docstring'''
A_ = [{"col_1": 1}, {"col_2": "x"}]
A_ = Dataset.from_list(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def snake_case_ ( self ) -> Optional[int]: # checks if the type can be inferred from the second record
'''simple docstring'''
A_ = [{"col_1": []}, {"col_1": [1, 2]}]
A_ = Dataset.from_list(_SCREAMING_SNAKE_CASE )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = Dataset.from_list([] )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 364
|
'''simple docstring'''
import sys
__lowerCamelCase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase__ ( UpperCAmelCase__ = N ) -> int:
A_ = -sys.maxsize - 1
for i in range(len(UpperCAmelCase__ ) - 12 ):
A_ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
A_ = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 101
| 0
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def UpperCamelCase ( UpperCAmelCase ) ->Tuple: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def UpperCamelCase ( ) ->List[str]:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
a_ = [1, 2, 3]
with pytest.raises(UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=2 )
with pytest.raises(UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def UpperCamelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
a_ = [1, 2]
a_ = {"a": 1, "b": 2}
a_ = {"a": [1, 2], "b": [3, 4]}
a_ = {"a": {"1": 1}, "b": 2}
a_ = {"a": 1, "b": 2, "c": 3, "d": 4}
a_ = [2, 3]
a_ = {"a": 2, "b": 3}
a_ = {"a": [2, 3], "b": [4, 5]}
a_ = {"a": {"1": 2}, "b": 3}
a_ = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) == expected_map_nested_sa
| 243
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
a_ = 1
for n in range(m + 1 ):
for k in range(1 , UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCamelCase_ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCamelCase_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 243
| 1
|
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
A : List[str] = logging.get_logger(__name__)
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
try:
with open(_UpperCamelCase , "rb" ) as flax_state_f:
__lowerCAmelCase = from_bytes(_UpperCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_UpperCamelCase ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
__lowerCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
__lowerCAmelCase = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
__lowerCAmelCase = ""
__lowerCAmelCase = flatten_dict(_UpperCamelCase , sep="." )
__lowerCAmelCase = pt_model.state_dict()
# keep track of unexpected & missing keys
__lowerCAmelCase = []
__lowerCAmelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowerCAmelCase = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__lowerCAmelCase = flax_key_tuple_array[:-1] + ["weight"]
__lowerCAmelCase = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__lowerCAmelCase = flax_key_tuple_array[:-1] + ["weight"]
__lowerCAmelCase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__lowerCAmelCase = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_UpperCamelCase ):
__lowerCAmelCase = (
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
__lowerCAmelCase = ".".join(_UpperCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
__lowerCAmelCase = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
__lowerCAmelCase = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
__lowerCAmelCase = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(_UpperCamelCase ) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
" use it for predictions and inference." )
return pt_model
| 366
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : str = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 259
| 0
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __A (self ) -> Optional[Any]:
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowercase ='''xvjiarui/stable-diffusion-2-inpainting'''
_lowercase , _lowercase =FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase , safety_checker=UpperCAmelCase )
_lowercase ='''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowercase =jax.random.PRNGKey(0 )
_lowercase =5_0
_lowercase =jax.device_count()
_lowercase =num_samples * [prompt]
_lowercase =num_samples * [init_image]
_lowercase =num_samples * [mask_image]
_lowercase , _lowercase , _lowercase =pipeline.prepare_inputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# shard inputs and rng
_lowercase =replicate(UpperCAmelCase )
_lowercase =jax.random.split(UpperCAmelCase , jax.device_count() )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =pipeline(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase )
_lowercase =output.images.reshape(UpperCAmelCase , 5_1_2 , 5_1_2 , 3 )
_lowercase =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowercase =jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowercase =jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 5
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A ( unittest.TestCase ):
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=__UpperCAmelCase , )
assert hasattr(self , "env" )
def lowercase_ (self : List[Any] , __UpperCAmelCase : Optional[int]=1 ) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def lowercase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __UpperCAmelCase )
| 65
| 0
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE=0.01 ,_SCREAMING_SNAKE_CASE=1_000 ) -> int:
UpperCAmelCase_ : Union[str, Any] = p_stop
UpperCAmelCase_ : Optional[Any] = max_length
def __iter__( self ) -> Dict:
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Optional[int] = False
while not stop and count < self.max_length:
yield count
count += 1
UpperCAmelCase_ : Optional[int] = random.random() < self.p_stop
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = [
BatchSamplerShard(_SCREAMING_SNAKE_CASE ,2 ,_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
for i in range(2 )
]
UpperCAmelCase_ : Optional[Any] = [list(_SCREAMING_SNAKE_CASE ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_SCREAMING_SNAKE_CASE ) for shard in batch_sampler_shards] ,[len(_SCREAMING_SNAKE_CASE ) for e in expected] )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of total batch size.
UpperCAmelCase_ : int = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase_ : str = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase_ : Optional[Any] = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
UpperCAmelCase_ : Tuple = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = [[], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
# Check the shards when the dataset is a round multiple of batch size.
UpperCAmelCase_ : Tuple = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase_ : int = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase_ : Any = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
UpperCAmelCase_ : Optional[Any] = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [[], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of total batch size.
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase_ : Dict = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase_ : str = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase_ : int = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
UpperCAmelCase_ : Dict = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[Any]:
# Check the shards when the dataset is a round multiple of batch size.
UpperCAmelCase_ : Optional[int] = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase_ : Optional[int] = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase_ : Dict = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
UpperCAmelCase_ : List[Any] = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = [[[0, 1]], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = [[], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Any = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
UpperCAmelCase_ : Union[str, Any] = [BatchSamplerShard(_SCREAMING_SNAKE_CASE ,2 ,_SCREAMING_SNAKE_CASE ,even_batches=_SCREAMING_SNAKE_CASE ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) ,3 )
self.assertEqual(len(batch_sampler_shards[1] ) ,2 )
self.assertListEqual(list(batch_sampler_shards[0] ) ,[[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) ,[[3, 4], [9, 10, 11]] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
random.seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [
IterableDatasetShard(
_SCREAMING_SNAKE_CASE ,batch_size=_SCREAMING_SNAKE_CASE ,drop_last=_SCREAMING_SNAKE_CASE ,num_processes=_SCREAMING_SNAKE_CASE ,process_index=_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE ,)
for i in range(_SCREAMING_SNAKE_CASE )
]
UpperCAmelCase_ : Optional[Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_SCREAMING_SNAKE_CASE )
iterable_dataset_lists.append(list(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Optional[Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
UpperCAmelCase_ : Any = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(len(_SCREAMING_SNAKE_CASE ) % shard_batch_size == 0 )
UpperCAmelCase_ : List[str] = []
for idx in range(0 ,len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_SCREAMING_SNAKE_CASE ) < len(_SCREAMING_SNAKE_CASE ):
reference += reference
self.assertListEqual(_SCREAMING_SNAKE_CASE ,reference[: len(_SCREAMING_SNAKE_CASE )] )
def a__ ( self ) -> int:
UpperCAmelCase_ : Union[str, Any] = 42
UpperCAmelCase_ : Tuple = RandomIterableDataset()
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE )
# Edge case with a very small dataset
UpperCAmelCase_ : Dict = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE ,split_batches=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = BatchSampler(range(16 ) ,batch_size=4 ,drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = SkipBatchSampler(_SCREAMING_SNAKE_CASE ,2 )
self.assertListEqual(list(_SCREAMING_SNAKE_CASE ) ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : int = SkipDataLoader(list(range(16 ) ) ,batch_size=4 ,skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Dict = DataLoader(list(range(16 ) ) ,batch_size=4 )
UpperCAmelCase_ : List[Any] = skip_first_batches(_SCREAMING_SNAKE_CASE ,num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = DataLoaderShard(list(range(16 ) ) ,batch_size=4 )
for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
def a__ ( self ) -> Dict:
Accelerator()
UpperCAmelCase_ : Any = DataLoaderDispatcher(range(16 ) ,batch_size=4 )
for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
| 361
|
from random import shuffle
import tensorflow as tf
from numpy import array
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = int(_lowercase )
assert noofclusters < len(_lowercase )
# Find out the dimensionality
UpperCAmelCase_ : Optional[int] = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCAmelCase_ : Optional[int] = list(range(len(_lowercase ) ) )
shuffle(_lowercase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCAmelCase_ : List[str] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCAmelCase_ : Tuple = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCAmelCase_ : str = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowercase )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCAmelCase_ : List[Any] = tf.placeholder('''float64''' , [dim] )
UpperCAmelCase_ : List[Any] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowercase , _lowercase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCAmelCase_ : Optional[int] = [tf.Variable(0 ) for i in range(len(_lowercase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCAmelCase_ : List[Any] = tf.placeholder('''int32''' )
UpperCAmelCase_ : Union[str, Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowercase , _lowercase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCAmelCase_ : Tuple = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCAmelCase_ : str = tf.reduce_mean(_lowercase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCAmelCase_ : List[Any] = tf.placeholder('''float''' , [dim] )
UpperCAmelCase_ : Tuple = tf.placeholder('''float''' , [dim] )
UpperCAmelCase_ : Optional[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowercase , _lowercase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCAmelCase_ : int = tf.placeholder('''float''' , [noofclusters] )
UpperCAmelCase_ : Any = tf.argmin(_lowercase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCAmelCase_ : List[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowercase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCAmelCase_ : Tuple = 100
for _ in range(_lowercase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowercase ) ):
UpperCAmelCase_ : Optional[int] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCAmelCase_ : Tuple = [
sess.run(_lowercase , feed_dict={va: vect, va: sess.run(_lowercase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCAmelCase_ : Dict = sess.run(
_lowercase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowercase ):
# Collect all the vectors assigned to this cluster
UpperCAmelCase_ : List[str] = [
vectors[i]
for i in range(len(_lowercase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCAmelCase_ : Any = sess.run(
_lowercase , feed_dict={mean_input: array(_lowercase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCAmelCase_ : Optional[int] = sess.run(_lowercase )
UpperCAmelCase_ : int = sess.run(_lowercase )
return centroids, assignments
| 235
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase( __a, unittest.TestCase ):
lowercase_ : str = DiTPipeline
lowercase_ : Tuple = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase_ : Tuple = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowercase_ : List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase_ : Dict = False
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Tuple = TransformeraDModel(
sample_size=16, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=a_, activation_fn='gelu-approximate', num_embeds_ada_norm=10_00, norm_type='ada_norm_zero', norm_elementwise_affine=a_, )
_lowercase : List[Any] = AutoencoderKL()
_lowercase : Any = DDIMScheduler()
_lowercase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Dict:
"""simple docstring"""
if str(a_).startswith('mps'):
_lowercase : str = torch.manual_seed(a_)
else:
_lowercase : Dict = torch.Generator(device=a_).manual_seed(a_)
_lowercase : Union[str, Any] = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Tuple = """cpu"""
_lowercase : str = self.get_dummy_components()
_lowercase : Union[str, Any] = self.pipeline_class(**a_)
pipe.to(a_)
pipe.set_progress_bar_config(disable=a_)
_lowercase : int = self.get_dummy_inputs(a_)
_lowercase : Any = pipe(**a_).images
_lowercase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 16, 16, 3))
_lowercase : List[str] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7])
_lowercase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a_, 1E-3)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=a_, expected_max_diff=1E-3)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@require_torch_gpu
@slow
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : Dict = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256')
pipe.to('cuda')
_lowercase : Optional[Any] = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowercase : List[str] = pipe.get_label_ids(a_)
_lowercase : List[str] = pipe(a_, generator=a_, num_inference_steps=40, output_type='np').images
for word, image in zip(a_, a_):
_lowercase : Dict = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''')
assert np.abs((expected_image - image).max()) < 1E-2
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512')
_lowercase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.to('cuda')
_lowercase : Dict = ["""vase""", """umbrella"""]
_lowercase : List[str] = pipe.get_label_ids(a_)
_lowercase : Tuple = torch.manual_seed(0)
_lowercase : Dict = pipe(a_, generator=a_, num_inference_steps=25, output_type='np').images
for word, image in zip(a_, a_):
_lowercase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
F'''/dit/{word}_512.npy''')
assert np.abs((expected_image - image).max()) < 1E-1
| 21
|
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
A_ = TypeVar('''T''')
A_ = Union[List[T], Tuple[T, ...]]
A_ = Union[T, List[T], Dict[str, T]]
A_ = Union[str, bytes, os.PathLike]
| 64
| 0
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase , lowercase = True , lowercase = False ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = scheduler
lowerCamelCase_ = optimizers if isinstance(lowercase , (list, tuple) ) else [optimizers]
lowerCamelCase_ = split_batches
lowerCamelCase_ = step_with_optimizer
lowerCamelCase_ = GradientState()
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> Any:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowercase , **lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowercase , **lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCamelCase_ = AcceleratorState().num_processes
for _ in range(lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowercase , **lowercase )
else:
self.scheduler.step(*lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
"""simple docstring"""
return self.scheduler.get_last_lr()
def SCREAMING_SNAKE_CASE_( self ) -> str:
"""simple docstring"""
return self.scheduler.state_dict()
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Tuple:
"""simple docstring"""
self.scheduler.load_state_dict(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
"""simple docstring"""
return self.scheduler.get_lr()
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> Union[str, Any]:
"""simple docstring"""
return self.scheduler.print_lr(*lowercase , **lowercase )
| 353
|
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase , lowercase ) -> List[Any]:
if dst_width < 0 or dst_height < 0:
raise ValueError("Destination width/height should be > 0" )
lowerCamelCase_ = img
lowerCamelCase_ = img.shape[1]
lowerCamelCase_ = img.shape[0]
lowerCamelCase_ = dst_width
lowerCamelCase_ = dst_height
lowerCamelCase_ = self.src_w / self.dst_w
lowerCamelCase_ = self.src_h / self.dst_h
lowerCamelCase_ = lowerCamelCase_ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowerCamelCase_ = self.img[self.get_y(lowercase )][self.get_x(lowercase )]
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
return int(self.ratio_x * x )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
__A, __A =8_0_0, 6_0_0
__A =imread('''image_data/lena.jpg''', 1)
__A =NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 47
| 0
|
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ : Tuple = logging.get_logger(__name__)
class UpperCamelCase__ ( _lowercase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : str , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , SCREAMING_SNAKE_CASE_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **SCREAMING_SNAKE_CASE_ : str , ):
super().__init__(**__UpperCamelCase )
lowerCAmelCase_ : Dict = size if size is not None else {'shortest_edge': 2_2_4}
lowerCAmelCase_ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
lowerCAmelCase_ : Dict = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase_ : Any = get_size_dict(__UpperCamelCase , param_name='crop_size' )
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : List[Any] = size
lowerCAmelCase_ : Tuple = resample
lowerCAmelCase_ : Any = do_center_crop
lowerCAmelCase_ : List[str] = crop_size
lowerCAmelCase_ : int = do_rescale
lowerCAmelCase_ : List[str] = rescale_factor
lowerCAmelCase_ : List[Any] = do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase_ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCAmelCase_ : str = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
lowerCAmelCase_ : Any = get_resize_output_image_size(__UpperCamelCase , size=__UpperCamelCase , default_to_square=__UpperCamelCase )
lowerCAmelCase_ : Any = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}" )
return resize(
__UpperCamelCase , size=(size_dict['height'], size_dict['width']) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ):
lowerCAmelCase_ : Tuple = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys \'height\' and \'width\'. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size['height'], size['width']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[int, float] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Dict , ):
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[float] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, Iterable[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, Iterable[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , SCREAMING_SNAKE_CASE_ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : int , ):
lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Dict = resample if resample is not None else self.resample
lowerCAmelCase_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : List[str] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : Dict = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : str = size if size is not None else self.size
lowerCAmelCase_ : List[str] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ : Optional[Any] = get_size_dict(__UpperCamelCase , param_name='crop_size' )
lowerCAmelCase_ : Dict = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Dict = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
lowerCAmelCase_ : Dict = [self.resize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for image in images]
if do_center_crop:
lowerCAmelCase_ : List[str] = [self.center_crop(__UpperCamelCase , __UpperCamelCase ) for image in images]
if do_rescale:
lowerCAmelCase_ : Optional[Any] = [self.rescale(__UpperCamelCase , __UpperCamelCase ) for image in images]
if do_normalize:
lowerCAmelCase_ : Any = [self.normalize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for image in images]
lowerCAmelCase_ : List[Any] = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
lowerCAmelCase_ : Dict = {'pixel_values': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 224
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _lowercase):
snake_case__ = ['''input_values''', '''padding_mask''']
def __init__( self : Optional[Any] , __UpperCamelCase : int = 1 , __UpperCamelCase : int = 2_4000 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = None , __UpperCamelCase : float = None , **__UpperCamelCase : Optional[Any] , ) -> Optional[int]:
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = chunk_length_s
_UpperCamelCase = overlap
@property
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Union[str, Any] , __UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCamelCase : Optional[Union[bool, str, PaddingStrategy]] = None , __UpperCamelCase : Optional[bool] = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Optional[int] = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
_UpperCamelCase = True
_UpperCamelCase = bool(
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_UpperCamelCase = [np.asarray(__UpperCamelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
_UpperCamelCase = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_UpperCamelCase = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCamelCase = [np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
_UpperCamelCase = None
_UpperCamelCase = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_UpperCamelCase = min(array.shape[0] for array in raw_audio )
_UpperCamelCase = int(np.floor(max_length / self.chunk_stride ) )
_UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_UpperCamelCase = max(array.shape[0] for array in raw_audio )
_UpperCamelCase = int(np.ceil(max_length / self.chunk_stride ) )
_UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
_UpperCamelCase = '''max_length'''
else:
_UpperCamelCase = input_values
# normal padding on batch
if padded_inputs is None:
_UpperCamelCase = self.pad(
__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , padding=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
if padding:
_UpperCamelCase = padded_inputs.pop('''attention_mask''' )
_UpperCamelCase = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
_UpperCamelCase = example[..., None]
input_values.append(example.T )
_UpperCamelCase = input_values
if return_tensors is not None:
_UpperCamelCase = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 256
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Any = "altclip_text_model"
def __init__( self , A=25_00_02 , A=10_24 , A=24 , A=16 , A=40_96 , A="gelu" , A=0.1 , A=0.1 , A=5_14 , A=1 , A=0.02 , A=0.02 , A=1e-0_5 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=7_68 , **A , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = hidden_act
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = initializer_range
lowerCamelCase = initializer_factor
lowerCamelCase = layer_norm_eps
lowerCamelCase = position_embedding_type
lowerCamelCase = use_cache
lowerCamelCase = project_dim
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Dict = "altclip_vision_model"
def __init__( self , A=7_68 , A=30_72 , A=5_12 , A=12 , A=12 , A=3 , A=2_24 , A=32 , A="quick_gelu" , A=1e-5 , A=0.0 , A=0.02 , A=1.0 , **A , ) -> Dict:
'''simple docstring'''
super().__init__(**A )
lowerCamelCase = hidden_size
lowerCamelCase = intermediate_size
lowerCamelCase = projection_dim
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = image_size
lowerCamelCase = initializer_range
lowerCamelCase = initializer_factor
lowerCamelCase = attention_dropout
lowerCamelCase = layer_norm_eps
lowerCamelCase = hidden_act
@classmethod
def __A ( cls , A , **A ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A )
lowerCamelCase , lowerCamelCase = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
lowerCamelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = "altclip"
UpperCamelCase : Optional[Any] = True
def __init__( self , A=None , A=None , A=7_68 , A=2.6592 , **A ) -> Dict:
'''simple docstring'''
lowerCamelCase = kwargs.pop("""text_config_dict""" , A )
lowerCamelCase = kwargs.pop("""vision_config_dict""" , A )
super().__init__(**A )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase = {}
# This is the complete result when using `text_config_dict`.
lowerCamelCase = AltCLIPTextConfig(**A ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase = (
F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
F'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase = (
F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
F'value `text_config["{key}"]` will be overriden.'
)
logger.warning(A )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase = {}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase = AltCLIPVisionConfig(**A ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase = {
str(A ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase = (
F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
F'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase = (
F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
F'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(A )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
lowerCamelCase = AltCLIPTextConfig(**A )
lowerCamelCase = AltCLIPVisionConfig(**A )
lowerCamelCase = projection_dim
lowerCamelCase = logit_scale_init_value
lowerCamelCase = 1.0
@classmethod
def __A ( cls , A , A , **A ) -> Dict:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = copy.deepcopy(self.__dict__ )
lowerCamelCase = self.text_config.to_dict()
lowerCamelCase = self.vision_config.to_dict()
lowerCamelCase = self.__class__.model_type
return output
| 66
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict ):
'''simple docstring'''
lowerCamelCase = Mock()
lowerCamelCase = conn, Mock()
lowerCamelCase = iter([1, None] )
lowerCamelCase = lambda lowerCamelCase__ : next(lowerCamelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCamelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 66
| 1
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = LxmertConfig.from_json_file(lowerCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
lowercase__ : Any = LxmertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 130
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCAmelCase__ = {
'''b0''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_2_4,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_4_0,
'''dropout_rate''': 0.2,
'''dw_padding''': [1_6],
},
'''b2''': {
'''hidden_dim''': 1_4_0_8,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_6_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 1_6],
},
'''b3''': {
'''hidden_dim''': 1_5_3_6,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_0_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 1_8],
},
'''b4''': {
'''hidden_dim''': 1_7_9_2,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_8_0,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_0_4_8,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_5_6,
'''dropout_rate''': 0.4,
'''dw_padding''': [1_3, 2_7],
},
'''b6''': {
'''hidden_dim''': 2_3_0_4,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_2_8,
'''dropout_rate''': 0.5,
'''dw_padding''': [3_1],
},
'''b7''': {
'''hidden_dim''': 2_5_6_0,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_0_0,
'''dropout_rate''': 0.5,
'''dw_padding''': [1_8],
},
}
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = EfficientNetConfig()
lowercase__ : int = CONFIG_MAP[model_name]["hidden_dim"]
lowercase__ : Any = CONFIG_MAP[model_name]["width_coef"]
lowercase__ : Optional[Any] = CONFIG_MAP[model_name]["depth_coef"]
lowercase__ : List[str] = CONFIG_MAP[model_name]["image_size"]
lowercase__ : List[Any] = CONFIG_MAP[model_name]["dropout_rate"]
lowercase__ : Optional[int] = CONFIG_MAP[model_name]["dw_padding"]
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : Any = "imagenet-1k-id2label.json"
lowercase__ : List[Any] = 1_000
lowercase__ : List[Any] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : List[str] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : List[str] = idalabel
lowercase__ : Dict = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : str = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = CONFIG_MAP[model_name]["image_size"]
lowercase__ : Optional[int] = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowerCamelCase__ , )
return preprocessor
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowercase__ : Any = sorted(set(lowerCamelCase__ ) )
lowercase__ : List[Any] = len(lowerCamelCase__ )
lowercase__ : Dict = {b: str(lowerCamelCase__ ) for b, i in zip(lowerCamelCase__ , range(lowerCamelCase__ ) )}
lowercase__ : List[str] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowercase__ : Tuple = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowercase__ : List[str] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ : Tuple = "efficientnet." + item[1]
lowercase__ : Union[str, Any] = "classifier.weight"
lowercase__ : List[Any] = "classifier.bias"
return key_mapping
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ : List[str] = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ : int = torch.from_numpy(lowerCamelCase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ : Any = torch.from_numpy(lowerCamelCase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ : List[Any] = torch.from_numpy(np.transpose(lowerCamelCase__ ) )
else:
lowercase__ : Tuple = torch.from_numpy(lowerCamelCase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCamelCase__ )
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = model_classes[model_name](
include_top=lowerCamelCase__ , weights="imagenet" , input_tensor=lowerCamelCase__ , input_shape=lowerCamelCase__ , pooling=lowerCamelCase__ , classes=1_000 , classifier_activation="softmax" , )
lowercase__ : int = original_model.trainable_variables
lowercase__ : str = original_model.non_trainable_variables
lowercase__ : Optional[Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ : Tuple = param.numpy()
lowercase__ : Optional[Any] = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ : int = get_efficientnet_config(lowerCamelCase__ )
lowercase__ : int = EfficientNetForImageClassification(lowerCamelCase__ ).eval()
lowercase__ : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowercase__ : str = rename_keys(lowerCamelCase__ )
replace_params(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Initialize preprocessor and preprocess input image
lowercase__ : Any = convert_image_processor(lowerCamelCase__ )
lowercase__ : int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = hf_model(**lowerCamelCase__ )
lowercase__ : List[Any] = outputs.logits.detach().numpy()
# Original model inference
lowercase__ : Optional[int] = False
lowercase__ : Any = CONFIG_MAP[model_name]["image_size"]
lowercase__ : List[str] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ : Union[str, Any] = image.img_to_array(lowerCamelCase__ )
lowercase__ : List[Any] = np.expand_dims(lowerCamelCase__ , axis=0 )
lowercase__ : List[Any] = original_model.predict(lowerCamelCase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCamelCase__ ):
os.mkdir(lowerCamelCase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCamelCase__ )
preprocessor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ : List[str] = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowerCamelCase__ )
hf_model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCAmelCase__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 130
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : List[str]= {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any]= [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
_a : Any= [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
_a : List[Any]= [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
_a : Optional[int]= [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_a : List[str]= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 95
|
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Optional[int] = """new-model"""
if is_tf_available():
class UpperCamelCase ( lowercase ):
UpperCAmelCase : List[str] = NewModelConfig
@require_tf
class UpperCamelCase ( unittest.TestCase ):
@slow
def _lowercase (self : List[str]) -> Dict:
__snake_case : Any = 'bert-base-cased'
__snake_case : Optional[Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Union[str, Any] = TFAutoModel.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : List[Any]) -> str:
__snake_case : Optional[int] = 'bert-base-cased'
__snake_case : List[Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : Any) -> List[str]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : List[str] = TFAutoModelForCausalLM.from_pretrained(_A)
__snake_case , __snake_case : List[str] = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : Tuple) -> Dict:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : Union[str, Any]) -> Optional[int]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(_A)
__snake_case , __snake_case : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : str) -> Union[str, Any]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Dict = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : str = TFAutoModelForSeqaSeqLM.from_pretrained(_A)
__snake_case , __snake_case : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : str) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__snake_case : Tuple = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Tuple = TFAutoModelForSequenceClassification.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : Optional[Any]) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__snake_case : List[str] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Any = TFAutoModelForQuestionAnswering.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
@require_tensorflow_probability
def _lowercase (self : List[Any]) -> List[str]:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : int = TFAutoModelForTableQuestionAnswering.from_pretrained(_A)
__snake_case , __snake_case : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
_A , output_loading_info=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : Optional[Any]) -> Optional[Any]:
__snake_case : Optional[int] = TFAutoModelWithLMHead.from_pretrained(_A)
self.assertIsInstance(_A , _A)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=_A) , 1_44_10)
def _lowercase (self : Any) -> List[str]:
__snake_case : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A)
self.assertIsInstance(_A , _A)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=_A) , 1_44_10)
def _lowercase (self : Optional[Any]) -> str:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__snake_case : Optional[Any] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny')
self.assertIsInstance(_A , _A)
__snake_case : int = copy.deepcopy(model.config)
__snake_case : int = ['FunnelBaseModel']
__snake_case : int = TFAutoModel.from_config(_A)
self.assertIsInstance(_A , _A)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A)
__snake_case : List[Any] = TFAutoModel.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : List[Any]) -> int:
try:
AutoConfig.register('new-model' , _A)
__snake_case : int = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(_A):
auto_class.register(_A , _A)
auto_class.register(_A , _A)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A):
auto_class.register(_A , _A)
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case : Union[str, Any] = BertModelTester(self).get_config()
__snake_case : Optional[int] = NewModelConfig(**tiny_config.to_dict())
__snake_case : List[str] = auto_class.from_config(_A)
self.assertIsInstance(_A , _A)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A)
__snake_case : Tuple = auto_class.from_pretrained(_A)
self.assertIsInstance(_A , _A)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _lowercase (self : Optional[int]) -> Union[str, Any]:
with self.assertRaisesRegex(
_A , 'bert-base is not a local folder and is not a valid model identifier'):
__snake_case : Any = TFAutoModel.from_pretrained('bert-base')
def _lowercase (self : str) -> str:
with self.assertRaisesRegex(
_A , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__snake_case : Optional[Any] = TFAutoModel.from_pretrained(_A , revision='aaaaaa')
def _lowercase (self : int) -> Any:
with self.assertRaisesRegex(
_A , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
__snake_case : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def _lowercase (self : Optional[Any]) -> Any:
with self.assertRaisesRegex(_A , 'Use `from_pt=True` to load this model'):
__snake_case : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
def _lowercase (self : str) -> Any:
# Make sure we have cached the model.
__snake_case : str = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
__snake_case : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
# With a sharded checkpoint
__snake_case : Optional[int] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
with RequestCounter() as counter:
__snake_case : Any = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 95
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if "resnet-50" in model_name:
A__ = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
A__ = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
A__ = DetrConfig(use_timm_backbone=UpperCamelCase__ , backbone_config=UpperCamelCase__ )
# set label attributes
A__ = 'panoptic' in model_name
if is_panoptic:
A__ = 250
else:
A__ = 91
A__ = 'huggingface/label-files'
A__ = 'coco-detection-id2label.json'
A__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
A__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = state_dict.pop(UpperCamelCase__ )
A__ = val
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
"""simple docstring"""
A__ = ''
if is_panoptic:
A__ = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A__ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
A__ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:256, :]
A__ = in_proj_bias[:256]
A__ = in_proj_weight[256:512, :]
A__ = in_proj_bias[256:512]
A__ = in_proj_weight[-256:, :]
A__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A__ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
A__ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:256, :]
A__ = in_proj_bias[:256]
A__ = in_proj_weight[256:512, :]
A__ = in_proj_bias[256:512]
A__ = in_proj_weight[-256:, :]
A__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A__ = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
A__ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A__ = in_proj_weight_cross_attn[:256, :]
A__ = in_proj_bias_cross_attn[:256]
A__ = in_proj_weight_cross_attn[256:512, :]
A__ = in_proj_bias_cross_attn[256:512]
A__ = in_proj_weight_cross_attn[-256:, :]
A__ = in_proj_bias_cross_attn[-256:]
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False ):
"""simple docstring"""
A__ , A__ = get_detr_config(UpperCamelCase__ )
# load original model from torch hub
A__ = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'''Converting model {model_name}...''' )
A__ = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=UpperCamelCase__ ).eval()
A__ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCamelCase__ ):
if is_panoptic:
A__ = 'detr.' + src
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase__ , is_panoptic=UpperCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A__ = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
A__ = state_dict.pop(UpperCamelCase__ )
A__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A__ = state_dict.pop(UpperCamelCase__ )
A__ = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
A__ = state_dict.pop(UpperCamelCase__ )
A__ = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
A__ = state_dict.pop(UpperCamelCase__ )
A__ = val
# finally, create HuggingFace model and load state dict
A__ = DetrForSegmentation(UpperCamelCase__ ) if is_panoptic else DetrForObjectDetection(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# verify our conversion on an image
A__ = 'coco_panoptic' if is_panoptic else 'coco_detection'
A__ = DetrImageProcessor(format=UpperCamelCase__ )
A__ = processor(images=prepare_img() , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = detr(UpperCamelCase__ )
A__ = model(UpperCamelCase__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
__lowerCamelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 221
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["ChineseCLIPFeatureExtractor"]
__lowerCamelCase = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 221
| 1
|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 151
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowercase : Any = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
lowercase : str = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
lowercase : str = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
lowercase : List[str] = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
lowercase : List[Any] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
'''simple docstring'''
def _lowerCamelCase ( self :List[Any] ) -> List[Any]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def _lowerCamelCase ( self :str , a :Tuple , a :str , a :Tuple=[1, 1_0, 1_0_0] , a :Optional[Any]=4 , a :Optional[int]=3.0 ) -> Dict:
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=a ) as executor:
__UpperCamelCase : List[Any] = []
__UpperCamelCase : str = Counter()
__UpperCamelCase : Tuple = 0
__UpperCamelCase : Dict = defaultdict(a )
for task_id, (candidates, test_case) in enumerate(zip(a , a ) ):
for candidate in candidates:
__UpperCamelCase : List[str] = candidate + "\n" + test_case
__UpperCamelCase : Tuple = (test_program, timeout, task_id, completion_id[task_id])
__UpperCamelCase : str = executor.submit(a , *a )
futures.append(a )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(a ):
__UpperCamelCase : int = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
__UpperCamelCase , __UpperCamelCase : Tuple = [], []
for result in results.values():
result.sort()
__UpperCamelCase : List[Any] = [r[1]["passed"] for r in result]
total.append(len(a ) )
correct.append(sum(a ) )
__UpperCamelCase : Union[str, Any] = np.array(a )
__UpperCamelCase : Dict = np.array(a )
__UpperCamelCase : List[str] = k
__UpperCamelCase : Optional[int] = {f'pass@{k}': estimate_pass_at_k(a , a , a ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]) -> Dict:
'''simple docstring'''
def estimator(_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1))
if isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : List[Any] = itertools.repeat(_lowerCamelCase , len(_lowerCamelCase))
else:
assert len(_lowerCamelCase) == len(_lowerCamelCase)
__UpperCamelCase : Optional[int] = iter(_lowerCamelCase)
return np.array([estimator(int(_lowerCamelCase) , int(_lowerCamelCase) , _lowerCamelCase) for n, c in zip(_lowerCamelCase , _lowerCamelCase)])
| 151
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Dict = "bridgetower_vision_model"
def __init__( self , a__=768 , a__=12 , a__=3 , a__=16 , a__=288 , a__=1 , a__=1e-05 , a__=False , a__=True , a__=False , **a__ , ) -> int:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = image_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = stop_gradient
snake_case_ = share_layernorm
snake_case_ = remove_last_layer
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "bridgetower_text_model"
def __init__( self , a__=50_265 , a__=768 , a__=12 , a__=12 , a__=1 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=514 , a__=1 , a__=1e-05 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , **a__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = initializer_factor
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = "bridgetower"
def __init__( self , a__=True , a__="gelu" , a__=768 , a__=1 , a__=1e-05 , a__=False , a__="add" , a__=12 , a__=6 , a__=False , a__=False , a__=None , a__=None , **a__ , ) -> int:
'''simple docstring'''
snake_case_ = kwargs.pop("text_config_dict" , a__ )
snake_case_ = kwargs.pop("vision_config_dict" , a__ )
super().__init__(**a__ )
snake_case_ = share_cross_modal_transformer_layers
snake_case_ = hidden_act
snake_case_ = hidden_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = share_link_tower_layers
snake_case_ = link_tower_type
snake_case_ = num_attention_heads
snake_case_ = num_hidden_layers
snake_case_ = tie_word_embeddings
snake_case_ = init_layernorm_from_vision_encoder
if text_config is None:
snake_case_ = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
snake_case_ = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
snake_case_ = BridgeTowerTextConfig(**a__ )
snake_case_ = BridgeTowerVisionConfig(**a__ )
@classmethod
def lowerCAmelCase__ ( cls , a__ , a__ , **a__ ) -> List[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.text_config.to_dict()
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 85
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE : int = {
"gpt-neox-20b": 2048,
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : str = VOCAB_FILES_NAMES
lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : str = ["input_ids", "attention_mask"]
def __init__( self , a__=None , a__=None , a__=None , a__="<|endoftext|>" , a__="<|endoftext|>" , a__="<|endoftext|>" , a__=False , **a__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , add_prefix_space=a__ , **a__ , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space:
snake_case_ = getattr(a__ , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**a__ )
snake_case_ = add_prefix_space
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def lowerCAmelCase__ ( self , a__ ) -> List[int]:
'''simple docstring'''
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids
| 85
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class lowerCAmelCase__ ( a_ ):
def __init__( self : str , *snake_case__ : Optional[int] , **snake_case__ : Any ):
'''simple docstring'''
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 364
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0}
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : int = min_resolution
UpperCAmelCase__ : Tuple = max_resolution
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : str = do_convert_rgb
UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def __a ( self : str ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = PixaStructImageProcessingTester(self )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Dict = 2_0_4_8
UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase__ : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case__ ):
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
UpperCAmelCase__ : Optional[Any] = "Hello"
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Dict ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase__ : Optional[int] = 3
@property
def __a ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : int ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 298
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ="""▁"""
a ={"""vocab_file""": """sentencepiece.bpe.model"""}
a ={
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
a ={
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
a =["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : str = VOCAB_FILES_NAMES
_UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase : List[int] = []
_UpperCAmelCase : List[int] = []
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : Tuple="</s>" ,SCREAMING_SNAKE_CASE__ : str="</s>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Dict="<unk>" ,SCREAMING_SNAKE_CASE__ : Tuple="<pad>" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="<mask>" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token
__lowerCamelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCamelCase : Any = kwargs.get('additional_special_tokens' ,[])
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=SCREAMING_SNAKE_CASE__ ,tgt_lang=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCamelCase : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCamelCase : Tuple = 1
__lowerCamelCase : List[Any] = len(self.sp_model)
__lowerCamelCase : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(SCREAMING_SNAKE_CASE__)
}
__lowerCamelCase : Tuple = {v: k for k, v in self.lang_code_to_id.items()}
__lowerCamelCase : Tuple = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
__lowerCamelCase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__lowerCamelCase : Optional[Any] = src_lang if src_lang is not None else 'en_XX'
__lowerCamelCase : Tuple = self.lang_code_to_id[self._src_lang]
__lowerCamelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def lowerCAmelCase ( self : Tuple):
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase ( self : int):
return self._src_lang
@src_lang.setter
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self : Optional[int]):
__lowerCamelCase : str = self.__dict__.copy()
__lowerCamelCase : Union[str, Any] = None
return state
def __setstate__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : List[str] = {}
__lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCamelCase : int = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Optional[int] = ''
__lowerCamelCase : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__) + token
__lowerCamelCase : Tuple = True
__lowerCamelCase : str = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__)
return out_string.strip()
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
return (out_vocab_file,)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = [1] * len(self.prefix_tokens)
__lowerCamelCase : Tuple = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE__)) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE__)) + ([0] * len(SCREAMING_SNAKE_CASE__)) + suffix_ones
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] ,SCREAMING_SNAKE_CASE__ : Optional[str] ,**SCREAMING_SNAKE_CASE__ : Dict):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
__lowerCamelCase : List[str] = src_lang
__lowerCamelCase : int = self(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = tgt_lang_id
return inputs
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : str = "en_XX" ,SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None ,SCREAMING_SNAKE_CASE__ : str = "ro_RO" ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
__lowerCamelCase : str = src_lang
__lowerCamelCase : str = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
return self.set_src_lang_special_tokens(self.src_lang)
def lowerCAmelCase ( self : Tuple):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : int = self.lang_code_to_id[src_lang]
__lowerCamelCase : str = [self.cur_lang_code_id]
__lowerCamelCase : Dict = [self.eos_token_id]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[Any] = self.lang_code_to_id[tgt_lang]
__lowerCamelCase : List[Any] = [self.cur_lang_code_id]
__lowerCamelCase : List[Any] = [self.eos_token_id]
| 73
|
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
| 0
|
from __future__ import annotations
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(SCREAMING_SNAKE_CASE__ )
or left < -len(SCREAMING_SNAKE_CASE__ )
or right >= len(SCREAMING_SNAKE_CASE__ )
or right < -len(SCREAMING_SNAKE_CASE__ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
__lowerCamelCase : int = (left + right) >> 1 # the middle
__lowerCamelCase : List[Any] = find_max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # find max in range[left, mid]
__lowerCamelCase : List[str] = find_max(SCREAMING_SNAKE_CASE__ , mid + 1 , SCREAMING_SNAKE_CASE__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 194
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[str] = np.inf
def set_batch_size(SCREAMING_SNAKE_CASE__ ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and feature.dtype == "binary":
__lowerCamelCase : List[str] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return None if batch_size is np.inf else batch_size
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Tuple , a: NestedDataStructureLike[PathLike] , a: Optional[NamedSplit] = None , a: Optional[Features] = None , a: str = None , a: bool = False , a: bool = False , a: Optional[int] = None , **a: Optional[Any] , ):
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
__lowerCamelCase : List[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
__lowerCamelCase : Optional[Any] = _PACKAGED_DATASETS_MODULES['parquet'][1]
__lowerCamelCase : List[str] = Parquet(
cache_dir=a , data_files=a , features=a , hash=a , **a , )
def _snake_case ( self: List[str] ):
# Build iterable dataset
if self.streaming:
__lowerCamelCase : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase : str = None
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : int = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
__lowerCamelCase : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Dataset , a: Union[PathLike, BinaryIO] , a: Optional[int] = None , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = dataset
__lowerCamelCase : List[Any] = path_or_buf
__lowerCamelCase : List[str] = batch_size or get_writer_batch_size(dataset.features )
__lowerCamelCase : List[Any] = parquet_writer_kwargs
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__lowerCamelCase : Optional[int] = self._write(file_obj=a , batch_size=a , **self.parquet_writer_kwargs )
else:
__lowerCamelCase : Any = self._write(file_obj=self.path_or_buf , batch_size=a , **self.parquet_writer_kwargs )
return written
def _snake_case ( self: Optional[int] , a: BinaryIO , a: int , **a: str ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Union[str, Any] = parquet_writer_kwargs.pop('path_or_buf' , a )
__lowerCamelCase : str = self.dataset.features.arrow_schema
__lowerCamelCase : Any = pq.ParquetWriter(a , schema=a , **a )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , a ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__lowerCamelCase : Any = query_table(
table=self.dataset._data , key=slice(a , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(a )
written += batch.nbytes
writer.close()
return written
| 194
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
UpperCAmelCase_ = ViTImageProcessor if is_vision_available() else None
@property
def snake_case_ (self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ (self ) -> Dict:
UpperCamelCase = (3, 32, 1_28)
UpperCamelCase = tempfile.mkdtemp()
# fmt: off
UpperCamelCase = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
UpperCamelCase = dict(zip(__a , range(len(__a ) ) ) )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
UpperCamelCase = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 1_28},
}
UpperCamelCase = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__a , __a )
def snake_case_ (self , **__a ) -> List[str]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case_ (self , **__a ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def snake_case_ (self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
UpperCamelCase = Image.fromarray(np.moveaxis(__a , 0 , -1 ) )
return image_input
def snake_case_ (self ) -> List[str]:
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case_ (self ) -> int:
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCamelCase = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
UpperCamelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case_ (self ) -> Any:
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(__a , return_tensors="np" )
UpperCamelCase = processor(images=__a , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ (self ) -> int:
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
UpperCamelCase = "test"
UpperCamelCase = processor(text=__a )
UpperCamelCase = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ (self ) -> List[str]:
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
UpperCamelCase = "test"
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.char_decode(__a )
UpperCamelCase = tokenizer.batch_decode(__a )
UpperCamelCase = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(__a , __a )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
UpperCamelCase = None
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def snake_case_ (self ) -> str:
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
UpperCamelCase = torch.randn(1 , 27 , 38 )
UpperCamelCase = torch.randn(1 , 27 , 5_02_57 )
UpperCamelCase = torch.randn(1 , 27 , 3_05_22 )
UpperCamelCase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 153
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
lowerCAmelCase__ = {
'''moussaKam/mbarthez''': 1_024,
'''moussaKam/barthez''': 1_024,
'''moussaKam/barthez-orangesum-title''': 1_024,
}
lowerCAmelCase__ = '''▁'''
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__(self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a = None , **__a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
UpperCamelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
UpperCamelCase = len(self.sp_model ) - 1
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case_ (self , __a , __a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ (self , __a , __a = None , __a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def snake_case_ (self , __a , __a = None ) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ (self ) -> Any:
return len(self.sp_model )
def snake_case_ (self ) -> int:
UpperCamelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ (self , __a ) -> List[str]:
return self.sp_model.encode(__a , out_type=__a )
def snake_case_ (self , __a ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase = self.sp_model.PieceToId(__a )
return spm_id if spm_id else self.unk_token_id
def snake_case_ (self , __a ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__a )
def snake_case_ (self , __a ) -> Union[str, Any]:
UpperCamelCase = []
UpperCamelCase = ""
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(__a )
UpperCamelCase = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __getstate__(self ) -> str:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__(self , __a ) -> Optional[int]:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ (self , __a , __a = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 153
| 1
|
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
lowercase__ = parser.parse_args()
lowercase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 161
|
"""simple docstring"""
import argparse
import os
import re
lowercase__ = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowercase__ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ = re.compile(r"""\[([^\]]+)\]""")
def __lowerCamelCase ( __UpperCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = _re_indent.search(__UpperCamelCase )
return "" if search is None else search.groups()[0]
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase=None , __UpperCamelCase=None ) -> str:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Dict = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(__UpperCamelCase ):
index += 1
lowerCAmelCase_ : Dict = ["\n".join(lines[:index] )]
else:
lowerCAmelCase_ : List[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase_ : Optional[Any] = [lines[index]]
index += 1
while index < len(__UpperCamelCase ) and (end_prompt is None or not lines[index].startswith(__UpperCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__UpperCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(__UpperCamelCase ) )
if index < len(__UpperCamelCase ) - 1:
lowerCAmelCase_ : List[Any] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase_ : Any = []
else:
blocks.append("\n".join(__UpperCamelCase ) )
lowerCAmelCase_ : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__UpperCamelCase ) > 0:
blocks.append("\n".join(__UpperCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__UpperCamelCase ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def __lowerCamelCase ( __UpperCamelCase ) -> Any:
"""simple docstring"""
def _inner(__UpperCamelCase ):
return key(__UpperCamelCase ).lower().replace("_" , "" )
return _inner
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=None ) -> List[str]:
"""simple docstring"""
def noop(__UpperCamelCase ):
return x
if key is None:
lowerCAmelCase_ : Optional[int] = noop
# Constants are all uppercase, they go first.
lowerCAmelCase_ : str = [obj for obj in objects if key(__UpperCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase_ : str = [obj for obj in objects if key(__UpperCamelCase )[0].isupper() and not key(__UpperCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase_ : int = [obj for obj in objects if not key(__UpperCamelCase )[0].isupper()]
lowerCAmelCase_ : Dict = ignore_underscore(__UpperCamelCase )
return sorted(__UpperCamelCase , key=__UpperCamelCase ) + sorted(__UpperCamelCase , key=__UpperCamelCase ) + sorted(__UpperCamelCase , key=__UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase ) -> List[str]:
"""simple docstring"""
def _replace(__UpperCamelCase ):
lowerCAmelCase_ : Tuple = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
lowerCAmelCase_ : Optional[int] = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ : Optional[int] = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(__UpperCamelCase )] ) + "]"
lowerCAmelCase_ : Union[str, Any] = import_statement.split("\n" )
if len(__UpperCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase_ : Optional[int] = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase_ : Optional[Any] = [(i, _re_strip_line.search(__UpperCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase_ : List[Any] = sort_objects(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] )
lowerCAmelCase_ : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__UpperCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase_ : Dict = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase_ : Optional[Any] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ : Any = keys[:-1]
lowerCAmelCase_ : Dict = get_indent(lines[1] ) + ", ".join([f'''"{k}"''' for k in sort_objects(__UpperCamelCase )] )
return "\n".join(__UpperCamelCase )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase_ : List[str] = _re_bracket_content.sub(_replace , __UpperCamelCase )
return import_statement
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=True ) -> Optional[int]:
"""simple docstring"""
with open(__UpperCamelCase , encoding="utf-8" ) as f:
lowerCAmelCase_ : List[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase_ : int = split_code_in_indented_blocks(
__UpperCamelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__UpperCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase_ : Optional[int] = main_blocks[block_idx]
lowerCAmelCase_ : Union[str, Any] = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase_ : str = 0
while line_idx < len(__UpperCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase_ : Optional[int] = len(__UpperCamelCase )
else:
line_idx += 1
if line_idx >= len(__UpperCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase_ : Optional[Any] = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase_ : Union[str, Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase_ : Tuple = split_code_in_indented_blocks(__UpperCamelCase , indent_level=__UpperCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase_ : List[Any] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase_ : Dict = [(pattern.search(__UpperCamelCase ).groups()[0] if pattern.search(__UpperCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase_ : Any = [(i, key) for i, key in enumerate(__UpperCamelCase ) if key is not None]
lowerCAmelCase_ : Union[str, Any] = [x[0] for x in sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : str = []
for i in range(len(__UpperCamelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase_ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__UpperCamelCase )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase_ : Any = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__UpperCamelCase ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(__UpperCamelCase ) )
def __lowerCamelCase ( __UpperCamelCase=True ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Any = []
for root, _, files in os.walk(__UpperCamelCase ):
if "__init__.py" in files:
lowerCAmelCase_ : Dict = sort_imports(os.path.join(__UpperCamelCase , "__init__.py" ) , check_only=__UpperCamelCase )
if result:
lowerCAmelCase_ : Union[str, Any] = [os.path.join(__UpperCamelCase , "__init__.py" )]
if len(__UpperCamelCase ) > 0:
raise ValueError(f'''Would overwrite {len(__UpperCamelCase )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowercase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 161
| 1
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_a = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = test_results.split(''' ''' )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = None
lowerCamelCase__ = False
for line in failures_short_lines.split('''\n''' ):
if re.search(R'''_ \[doctest\]''' ,__a ):
lowerCamelCase__ = True
lowerCamelCase__ = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
lowerCamelCase__ = line
lowerCamelCase__ = False
return failures
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = title
lowerCamelCase__ = doc_test_results['''time_spent'''].split(''',''' )[0]
lowerCamelCase__ = doc_test_results['''success''']
lowerCamelCase__ = doc_test_results['''failures''']
lowerCamelCase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ = doc_test_results
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = [self._time_spent]
lowerCamelCase__ = 0
for time in time_spent:
lowerCamelCase__ = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_a ) == 1:
lowerCamelCase__ = [0, 0, time_parts[0]]
lowerCamelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
lowerCamelCase__ = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return F'{int(_a )}h{int(_a )}m{int(_a )}s'
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = 4_0
lowerCamelCase__ = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_a , _a )}
lowerCamelCase__ = ''''''
for category, failures in category_failures.items():
if len(_a ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_a )
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase__ = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_a )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_a , )
def __lowerCamelCase ( self ):
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
lowerCamelCase__ = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else '''All tests passed.'''
lowerCamelCase__ = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_a , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = ''''''
for key, value in failures.items():
lowerCamelCase__ = value[:2_0_0] + ''' [Truncated]''' if len(_a ) > 2_5_0 else value
failures_text += F'*{key}*\n_{value}_\n\n'
lowerCamelCase__ = job_name
lowerCamelCase__ = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
lowerCamelCase__ = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
lowerCamelCase__ = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
lowerCamelCase__ = sorted(self.doc_test_results.items() , key=lambda __lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
lowerCamelCase__ = F'*Num failures* :{len(job_result["failed"] )} \n'
lowerCamelCase__ = job_result['''failures''']
lowerCamelCase__ = self.get_reply_blocks(_a , _a , _a , text=_a )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F'Results for {job}' , blocks=_a , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def lowerCAmelCase__() -> List[str]:
'''simple docstring'''
lowerCamelCase__ = os.environ['''GITHUB_RUN_ID''']
lowerCamelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCamelCase__ = requests.get(__a ).json()
lowerCamelCase__ = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
lowerCamelCase__ = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__a ):
lowerCamelCase__ = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' ,__a )
return {}
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = {}
if os.path.exists(__a ):
lowerCamelCase__ = os.listdir(__a )
for file in files:
try:
with open(os.path.join(__a ,__a ) ,encoding='''utf-8''' ) as f:
lowerCamelCase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(__a ,__a )}.' ) from e
return _artifact
def lowerCAmelCase__() -> Any:
'''simple docstring'''
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = name
lowerCamelCase__ = []
def __str__( self ):
'''simple docstring'''
return self.name
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
lowerCamelCase__ = {}
lowerCamelCase__ = filter(os.path.isdir ,os.listdir() )
for directory in directories:
lowerCamelCase__ = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ = Artifact(__a )
_available_artifacts[artifact_name].add_path(__a )
return _available_artifacts
if __name__ == "__main__":
_a = get_job_links()
_a = retrieve_available_artifacts()
_a = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_a = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_a = github_actions_job_links.get("run_doctests")
_a = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_a = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_a , _a , _a = handle_test_results(artifact["stats"])
_a = failed
_a = success
_a = time_spent[1:-1] + ", "
_a = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_a = line.replace("FAILED ", "")
_a = line.split()[0].replace("\n", "")
if "::" in line:
_a , _a = line.split("::")
else:
_a , _a = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_a = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_a = all_failures[test] if test in all_failures else "N/A"
_a = failure
break
_a = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 209
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (DDPMScheduler,)
def __lowercase ( self , **_a ) -> Any:
_a : List[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_a )
return config
def __lowercase ( self ) -> Any:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_a )
def __lowercase ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def __lowercase ( self ) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def __lowercase ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def __lowercase ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __lowercase ( self ) -> Dict:
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def __lowercase ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __lowercase ( self ) -> int:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=_a )
def __lowercase ( self ) -> int:
_a : int = self.scheduler_classes[0]
_a : List[Any] = self.get_scheduler_config()
_a : Dict = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __lowercase ( self ) -> Tuple:
_a : int = self.scheduler_classes[0]
_a : int = self.get_scheduler_config()
_a : int = scheduler_class(**_a )
_a : Optional[int] = len(_a )
_a : Optional[Any] = self.dummy_model()
_a : str = self.dummy_sample_deter
_a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
_a : str = model(_a , _a )
# 2. predict previous mean of sample x_t-1
_a : Optional[int] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a : List[Any] = pred_prev_sample
_a : str = torch.sum(torch.abs(_a ) )
_a : Optional[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[int] = self.scheduler_classes[0]
_a : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a : Union[str, Any] = scheduler_class(**_a )
_a : Dict = len(_a )
_a : int = self.dummy_model()
_a : Tuple = self.dummy_sample_deter
_a : List[Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
_a : Dict = model(_a , _a )
# 2. predict previous mean of sample x_t-1
_a : int = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a : str = pred_prev_sample
_a : str = torch.sum(torch.abs(_a ) )
_a : Tuple = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.scheduler_classes[0]
_a : Tuple = self.get_scheduler_config()
_a : Any = scheduler_class(**_a )
_a : Optional[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=_a )
_a : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
_a : Dict = -1
else:
_a : Tuple = timesteps[i + 1]
_a : Optional[Any] = scheduler.previous_timestep(_a )
_a : Optional[Any] = prev_t.item()
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = self.scheduler_classes[0]
_a : List[str] = self.get_scheduler_config()
_a : Tuple = scheduler_class(**_a )
_a : str = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(_a , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def __lowercase ( self ) -> str:
_a : List[str] = self.scheduler_classes[0]
_a : List[str] = self.get_scheduler_config()
_a : Dict = scheduler_class(**_a )
_a : Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
_a : Optional[Any] = len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def __lowercase ( self ) -> Optional[int]:
_a : Dict = self.scheduler_classes[0]
_a : Union[str, Any] = self.get_scheduler_config()
_a : int = scheduler_class(**_a )
_a : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a )
| 235
| 0
|
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = PriorTransformer
a__ = """hidden_states"""
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Union[str, Any] = 4
a__: Any = 8
a__: Optional[Any] = 7
a__: Tuple = floats_tensor((batch_size, embedding_dim)).to(lowercase)
a__: Optional[int] = floats_tensor((batch_size, embedding_dim)).to(lowercase)
a__: List[str] = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(lowercase)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase_ ( self , lowercase=0) -> str:
'''simple docstring'''
torch.manual_seed(lowercase)
a__: Optional[Any] = 4
a__: Optional[Any] = 8
a__: Union[str, Any] = 7
a__: Optional[Any] = torch.randn((batch_size, embedding_dim)).to(lowercase)
a__: List[str] = torch.randn((batch_size, embedding_dim)).to(lowercase)
a__: Tuple = torch.randn((batch_size, num_embeddings, embedding_dim)).to(lowercase)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return (4, 8)
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return (4, 8)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: int = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
a__: Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__: Union[str, Any] = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowercase)
self.assertIsNotNone(lowercase)
self.assertEqual(len(loading_info['missing_keys']) , 0)
model.to(lowercase)
a__: Any = model(**self.dummy_input)[0]
assert hidden_states is not None, "Make sure output is not None"
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__ , a__: Tuple = self.prepare_init_args_and_inputs_for_common()
a__: Any = self.model_class(**lowercase)
a__: str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__: Tuple = [*signature.parameters.keys()]
a__: List[Any] = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowercase)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: str = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy')
a__: str = model.to(lowercase)
if hasattr(lowercase , 'set_default_attn_processor'):
model.set_default_attn_processor()
a__: Dict = self.get_dummy_seed_input()
with torch.no_grad():
a__: str = model(**lowercase)[0]
a__: str = output[0, :5].flatten().cpu()
print(lowercase)
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
a__: Any = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239])
self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2))
@slow
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self , lowercase=1 , lowercase=7_68 , lowercase=77 , lowercase=0) -> int:
'''simple docstring'''
torch.manual_seed(lowercase)
a__: Union[str, Any] = batch_size
a__: List[str] = embedding_dim
a__: str = num_embeddings
a__: Tuple = torch.randn((batch_size, embedding_dim)).to(lowercase)
a__: List[str] = torch.randn((batch_size, embedding_dim)).to(lowercase)
a__: str = torch.randn((batch_size, num_embeddings, embedding_dim)).to(lowercase)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
])
def lowerCamelCase_ ( self , lowercase , lowercase) -> str:
'''simple docstring'''
a__: Tuple = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior')
model.to(lowercase)
a__: Optional[Any] = self.get_dummy_seed_input(seed=lowercase)
with torch.no_grad():
a__: Optional[int] = model(**lowercase)[0]
assert list(sample.shape) == [1, 7_68]
a__: List[str] = sample[0, :8].flatten().cpu()
print(lowercase)
a__: Union[str, Any] = torch.tensor(lowercase)
assert torch_all_close(lowercase , lowercase , atol=1e-3)
| 203
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
torch.manual_seed(0)
a__: str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
torch.manual_seed(0)
a__: List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowercase)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Union[str, Any] = self.dummy_uncond_unet
a__: Optional[int] = DDIMScheduler()
a__: Optional[int] = self.dummy_vq_model
a__: Union[str, Any] = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase)
ldm.to(lowercase)
ldm.set_progress_bar_config(disable=lowercase)
a__: str = torch.manual_seed(0)
a__: Dict = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy').images
a__: Union[str, Any] = torch.manual_seed(0)
a__: int = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase)[0]
a__: Union[str, Any] = image[0, -3:, -3:, -1]
a__: int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__: int = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172])
a__: Optional[Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance
@slow
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Union[str, Any] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256')
ldm.to(lowercase)
ldm.set_progress_bar_config(disable=lowercase)
a__: List[str] = torch.manual_seed(0)
a__: Optional[int] = ldm(generator=lowercase , num_inference_steps=5 , output_type='numpy').images
a__: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
a__: int = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447])
a__: Any = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
| 203
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Optional[Any] = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 133
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 47
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ = {
'roberta-base': 5_1_2,
'roberta-large': 5_1_2,
'roberta-large-mnli': 5_1_2,
'distilroberta-base': 5_1_2,
'roberta-base-openai-detector': 5_1_2,
'roberta-large-openai-detector': 5_1_2,
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : int = VOCAB_FILES_NAMES
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Tuple = ["""input_ids""", """attention_mask"""]
lowerCAmelCase_ : Optional[Any] = RobertaTokenizer
def __init__( self : List[str] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]="replace" , _UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : Any="</s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : str="<s>" , _UpperCAmelCase : List[Any]="<unk>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : str="<mask>" , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Dict=True , **_UpperCAmelCase : Any , ):
"""simple docstring"""
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _UpperCAmelCase ) != add_prefix_space:
UpperCAmelCase__ = getattr(_UpperCAmelCase , pre_tok_state.pop("""type""" ) )
UpperCAmelCase__ = add_prefix_space
UpperCAmelCase__ = pre_tok_class(**_UpperCAmelCase )
UpperCAmelCase__ = add_prefix_space
UpperCAmelCase__ = """post_processor"""
UpperCAmelCase__ = getattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
if tokenizer_component_instance:
UpperCAmelCase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ = tuple(state["""sep"""] )
if "cls" in state:
UpperCAmelCase__ = tuple(state["""cls"""] )
UpperCAmelCase__ = False
if state.get("""add_prefix_space""" , _UpperCAmelCase ) != add_prefix_space:
UpperCAmelCase__ = add_prefix_space
UpperCAmelCase__ = True
if state.get("""trim_offsets""" , _UpperCAmelCase ) != trim_offsets:
UpperCAmelCase__ = trim_offsets
UpperCAmelCase__ = True
if changes_to_apply:
UpperCAmelCase__ = getattr(_UpperCAmelCase , state.pop("""type""" ) )
UpperCAmelCase__ = component_class(**_UpperCAmelCase )
setattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else value
UpperCAmelCase__ = value
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = kwargs.get("""is_split_into_words""" , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = kwargs.get("""is_split_into_words""" , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
"""simple docstring"""
UpperCAmelCase__ = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=None ):
"""simple docstring"""
UpperCAmelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 61
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
UpperCAmelCase_ = logging.getLogger(__name__)
UpperCAmelCase_ = 'Hello world! cécé herlolip'
UpperCAmelCase_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ = BertAbsConfig(
temp_dir=""".""" , finetune_bert=SCREAMING_SNAKE_CASE__ , large=SCREAMING_SNAKE_CASE__ , share_emb=SCREAMING_SNAKE_CASE__ , use_bert_emb=SCREAMING_SNAKE_CASE__ , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
UpperCAmelCase__ = torch.load(SCREAMING_SNAKE_CASE__ , lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : storage )
UpperCAmelCase__ = AbsSummarizer(SCREAMING_SNAKE_CASE__ , torch.device("""cpu""" ) , SCREAMING_SNAKE_CASE__ )
original.eval()
UpperCAmelCase__ = BertAbsSummarizer(SCREAMING_SNAKE_CASE__ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
UpperCAmelCase__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
UpperCAmelCase__ = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(SCREAMING_SNAKE_CASE__ )) )
UpperCAmelCase__ = torch.tensor(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
UpperCAmelCase__ = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(SCREAMING_SNAKE_CASE__ )) )
UpperCAmelCase__ = torch.tensor(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase__ = encoder_input_ids
UpperCAmelCase__ = decoder_input_ids
UpperCAmelCase__ = UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = UpperCAmelCase__ = None
UpperCAmelCase__ = UpperCAmelCase__ = None
UpperCAmelCase__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase__ = original(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
UpperCAmelCase__ = original.generator(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = new_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
UpperCAmelCase__ = new_model.generator(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase__ = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
UpperCAmelCase_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 61
| 1
|
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int ,A_ : int ) -> Union[str, Any]:
A = n
A = [None] * self.n
A = 0 # index of the first element
A = 0
A = 0
def __len__( self : int ) -> int:
return self.size
def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.size == 0
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return False if self.is_empty() else self.array[self.front]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> int:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
A = data
A = (self.rear + 1) % self.n
self.size += 1
return self
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
if self.size == 0:
raise Exception('UNDERFLOW' )
A = self.array[self.front]
A = None
A = (self.front + 1) % self.n
self.size -= 1
return temp
| 74
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
lowerCAmelCase = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = None
# source code of `config_class`
lowercase__ = inspect.getsource(SCREAMING_SNAKE_CASE )
lowercase__ = _re_checkpoint.findall(SCREAMING_SNAKE_CASE )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowercase__ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowercase__ = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
lowercase__ = ckpt_name
break
return checkpoint
def _a ( ):
"""simple docstring"""
lowercase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowercase__ = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE )
lowercase__ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '''\n'''.join(sorted(SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 110
| 0
|
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag.
_lowerCAmelCase : int = 1 # The second color of the flag.
_lowerCAmelCase : int = 2 # The third color of the flag.
_lowerCAmelCase : Tuple = (red, white, blue)
def SCREAMING_SNAKE_CASE__ ( snake_case : list )-> Optional[Any]:
'''simple docstring'''
if not sequence:
return []
if len(_a ) == 1:
return list(_a )
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : str = len(_a ) - 1
UpperCAmelCase__ : Any = 0
while mid <= high:
if sequence[mid] == colors[0]:
UpperCAmelCase__ : int = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
UpperCAmelCase__ : Union[str, Any] = sequence[high], sequence[mid]
high -= 1
else:
UpperCAmelCase__ : Dict = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(_a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Union[str, Any] = input("""Enter numbers separated by commas:\n""").strip()
_lowerCAmelCase : Union[str, Any] = [int(item.strip()) for item in user_input.split(""",""")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 364
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0}
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : int = min_resolution
UpperCAmelCase__ : Tuple = max_resolution
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : str = do_convert_rgb
UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def __a ( self : str ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = PixaStructImageProcessingTester(self )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Dict = 2_0_4_8
UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase__ : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case__ ):
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
UpperCAmelCase__ : Optional[Any] = "Hello"
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Dict ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase__ : Optional[int] = 3
@property
def __a ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : int ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 298
| 0
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__lowercase = get_tests_dir('''fixtures''')
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = mock.Mock()
lowerCAmelCase = 500
lowerCAmelCase = {}
lowerCAmelCase = HTTPError
lowerCAmelCase = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__lowerCAmelCase) as mock_head:
lowerCAmelCase = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""")
# This check we did call the fake head request
mock_head.assert_called()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""")
def a_ ( self):
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCAmelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""")
lowerCAmelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""")
self.assertIsNotNone(__lowerCAmelCase)
@is_staging_test
class a__( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a_ ( cls):
"""simple docstring"""
lowerCAmelCase = TOKEN
HfFolder.save_token(__lowerCAmelCase)
@classmethod
def a_ ( cls):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""")
except HTTPError:
pass
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ViTImageProcessor.from_pretrained(__lowerCAmelCase)
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token)
lowerCAmelCase = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase))
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__lowerCAmelCase , repo_id="""test-image-processor""" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token)
lowerCAmelCase = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ViTImageProcessor.from_pretrained(__lowerCAmelCase)
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token)
lowerCAmelCase = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""")
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase))
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__lowerCAmelCase , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token)
lowerCAmelCase = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""")
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase))
def a_ ( self):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
lowerCAmelCase = CustomImageProcessor.from_pretrained(__lowerCAmelCase)
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
lowerCAmelCase = AutoImageProcessor.from_pretrained(
f"{USER}/test-dynamic-image-processor" , trust_remote_code=__lowerCAmelCase)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""")
| 272
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = MvpTokenizer
UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = filter_roberta_detectors
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""")
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""")
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
# Test that special tokens are reset
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""")
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , __lowerCAmelCase)
self.assertIn("""attention_mask""" , __lowerCAmelCase)
self.assertNotIn("""labels""" , __lowerCAmelCase)
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""")
self.assertEqual(32 , targets["""input_ids"""].shape[1])
@require_torch
def a_ ( self):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(batch.input_ids.shape , (2, 1024))
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""")
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
| 272
| 1
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : str = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
lowerCAmelCase : List[Any] = {
"""google/bigbird-roberta-base""": 4096,
"""google/bigbird-roberta-large""": 4096,
"""google/bigbird-base-trivia-itc""": 4096,
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = []
def __init__( self , _a , _a="<unk>" , _a="<s>" , _a="</s>" , _a="<pad>" , _a="[SEP]" , _a="[MASK]" , _a="[CLS]" , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sep_token=_a , mask_token=_a , cls_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
lowerCamelCase = vocab_file
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase = self.__dict__.copy()
lowerCamelCase = None
return state
def __setstate__( self , _a ):
"""simple docstring"""
lowerCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase = {}
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.sp_model.encode(_a , out_type=_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.sp_model.piece_to_id(_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = self.sp_model.IdToPiece(_a )
return token
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = []
lowerCamelCase = """"""
lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
lowerCamelCase = True
lowerCamelCase = []
else:
current_sub_tokens.append(_a )
lowerCamelCase = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _lowerCAmelCase ( self , _a , _a = False , _a = None , _a = True , **_a , ):
"""simple docstring"""
lowerCamelCase = kwargs.pop("""use_source_tokenizer""" , _a )
lowerCamelCase = self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase = []
lowerCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
lowerCamelCase = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowerCamelCase = re.sub(r""" (\[(MASK|SEP)\])""" , r"""\1""" , """ """.join(_a ) )
else:
lowerCamelCase = """""".join(_a )
lowerCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , """wb""" ) as fi:
lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCAmelCase ( self , _a , _a = None , _a = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 168
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : List[str] = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
@dataclass(frozen=UpperCAmelCase__ )
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 42
def __init__( self , _a , _a , _a , _a = None , _a=False , _a = False , ):
"""simple docstring"""
lowerCamelCase = hans_processors[task]()
lowerCamelCase = os.path.join(
_a , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(_a ) , _a , ) , )
lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase , lowerCamelCase = label_list[2], label_list[1]
lowerCamelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase = cached_features_file + """.lock"""
with FileLock(_a ):
if os.path.exists(_a ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
lowerCamelCase = torch.load(_a )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
lowerCamelCase = (
processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a )
)
logger.info("""Training examples: %s""" , len(_a ) )
lowerCamelCase = hans_convert_examples_to_features(_a , _a , _a , _a )
logger.info("""Saving features into cached file %s""" , _a )
torch.save(self.features , _a )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _a ):
"""simple docstring"""
return self.features[i]
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = 42
def __init__( self , _a , _a , _a , _a = 128 , _a=False , _a = False , ):
"""simple docstring"""
lowerCamelCase = hans_processors[task]()
lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase , lowerCamelCase = label_list[2], label_list[1]
lowerCamelCase = label_list
lowerCamelCase = processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a )
lowerCamelCase = hans_convert_examples_to_features(_a , _a , _a , _a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(_a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase = tf.data.Dataset.from_generator(
_a , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _a ):
"""simple docstring"""
return self.features[i]
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.label_list
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_a , """heuristics_train_set.txt""" ) ) , """train""" )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_a , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = []
for i, line in enumerate(_a ):
if i == 0:
continue
lowerCamelCase = """%s-%s""" % (set_type, line[0])
lowerCamelCase = line[5]
lowerCamelCase = line[6]
lowerCamelCase = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
lowerCamelCase = line[0]
examples.append(InputExample(guid=_a , text_a=_a , text_b=_a , label=_a , pairID=_a ) )
return examples
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Tuple:
lowerCamelCase = {label: i for i, label in enumerate(snake_case__ )}
lowerCamelCase = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case__ ) , desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d""" % (ex_index) )
lowerCamelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case__ , max_length=snake_case__ , padding="""max_length""" , truncation=snake_case__ , return_overflowing_tokens=snake_case__ , )
lowerCamelCase = label_map[example.label] if example.label in label_map else 0
lowerCamelCase = int(example.pairID )
features.append(InputFeatures(**snake_case__ , label=snake_case__ , pairID=snake_case__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
lowerCAmelCase : List[str] = {
"""hans""": 3,
}
lowerCAmelCase : str = {
"""hans""": HansProcessor,
}
| 168
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.