code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase__ ( __lowercase : Any ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def lowercase__ ( __lowercase : Tuple , __lowercase : Dict ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def lowercase__ ( __lowercase : List[Any] ) -> Dict:
"""simple docstring"""
__UpperCamelCase = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def lowercase__ ( __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Any ) -> Dict:
"""simple docstring"""
__UpperCamelCase = 'imagenet-1k-id2label.json'
__UpperCamelCase = 1000
__UpperCamelCase = 'huggingface/label-files'
__UpperCamelCase = num_labels
__UpperCamelCase = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
__UpperCamelCase = {int(__lowercase ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
__UpperCamelCase = __UpperCamelCase = CvtConfig(num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
__UpperCamelCase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
__UpperCamelCase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__UpperCamelCase = [2, 2, 20]
__UpperCamelCase = [3, 12, 16]
__UpperCamelCase = [192, 768, 1024]
__UpperCamelCase = CvtForImageClassification(__lowercase )
__UpperCamelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
__UpperCamelCase = image_size
__UpperCamelCase = torch.load(__lowercase , map_location=torch.device('cpu' ) )
__UpperCamelCase = OrderedDict()
__UpperCamelCase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__UpperCamelCase = list_of_state_dict + cls_token(__lowercase )
__UpperCamelCase = list_of_state_dict + embeddings(__lowercase )
for cnt in range(config.depth[idx] ):
__UpperCamelCase = list_of_state_dict + attention(__lowercase , __lowercase )
__UpperCamelCase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowercase )
for i in range(len(__lowercase ) ):
__UpperCamelCase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
a__ : List[str] =argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a__ : Dict =parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 53 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa"
SCREAMING_SNAKE_CASE_ : Dict =(
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
SCREAMING_SNAKE_CASE_ : List[str] ="document_qa"
SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor
SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel
SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"]
SCREAMING_SNAKE_CASE_ : Any =["text"]
def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*__A , **__A )
def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ):
__UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__UpperCamelCase = task_prompt.replace('{user_input}' , __A )
__UpperCamelCase = self.pre_processor.tokenizer(
__A , add_special_tokens=__A , return_tensors='pt' ).input_ids
__UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences
def _lowerCamelCase ( self : Tuple , __A : List[Any] ):
__UpperCamelCase = self.pre_processor.batch_decode(__A )[0]
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
__UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token
__UpperCamelCase = self.pre_processor.tokenajson(__A )
return sequence["answer"]
| 53 | 1 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def UpperCamelCase (lowercase_: int ) -> str:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _a (__magic_name__ ):
'''simple docstring'''
@staticmethod
def __A ( A__ ):
A__ : Any = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=A__ , default=A__ , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=A__ , help="""Name of the model to download""" )
download_parser.set_defaults(func=A__ )
def __init__( self , A__ , A__ , A__ , A__ ):
A__ : Union[str, Any] = model
A__ : Dict = cache
A__ : str = force
A__ : Tuple = trust_remote_code
def __A ( self ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 141 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase (lowercase_: str , lowercase_: Optional[int] ) -> str:
A__ : Union[str, Any] = old_name
if "patch_embed" in old_name:
A__ , A__ , A__ : Any = old_name.split(""".""" )
if layer == "0":
A__ : List[Any] = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
A__ : Optional[int] = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
A__ : int = old_name.replace("""3""" , """convolution2""" )
else:
A__ : Dict = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(r"""\d\.\d""" , lowercase_ ):
A__ : str = r"""\b\d{2}\b"""
if bool(re.search(lowercase_ , lowercase_ ) ):
A__ : Optional[Any] = re.search(r"""\d\.\d\d.""" , lowercase_ ).group()
else:
A__ : int = re.search(r"""\d\.\d.""" , lowercase_ ).group()
if int(match[0] ) < 6:
A__ : Optional[Any] = old_name.replace(lowercase_ , """""" )
A__ : Tuple = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
A__ : int = """intermediate_stages.""" + trimmed_name
else:
A__ : Dict = old_name.replace(lowercase_ , """""" )
if int(match[2] ) < num_meta4D_last_stage:
A__ : Optional[int] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
A__ : Optional[Any] = str(int(match[2] ) - num_meta4D_last_stage )
A__ : Dict = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
A__ : str = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
A__ : Optional[int] = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
A__ : List[Any] = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
A__ : Optional[Any] = trimmed_name.replace("""fc2""" , """linear_out""" )
A__ : str = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(r""".\d.""" , lowercase_ ):
A__ : List[str] = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
A__ : Optional[int] = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A__ : Optional[int] = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A__ : int = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
A__ : Tuple = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
A__ : Optional[int] = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
A__ : Optional[Any] = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
A__ : Optional[Any] = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A__ : Union[str, Any] = new_name.replace("""norm""" , """layernorm""" )
A__ : Union[str, Any] = """efficientformer.""" + new_name
else:
A__ : int = """efficientformer.encoder.""" + new_name
return new_name
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Union[str, Any] ) -> Tuple:
for key in checkpoint.copy().keys():
A__ : List[Any] = checkpoint.pop(lowercase_ )
A__ : Dict = val
return checkpoint
def UpperCamelCase () -> Optional[int]:
A__ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : List[str] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return image
def UpperCamelCase (lowercase_: Path , lowercase_: Path , lowercase_: Path , lowercase_: bool ) -> Tuple:
A__ : Any = torch.load(lowercase_ , map_location="""cpu""" )["""model"""]
A__ : List[Any] = EfficientFormerConfig.from_json_file(lowercase_ )
A__ : Any = EfficientFormerForImageClassificationWithTeacher(lowercase_ )
A__ : List[str] = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
A__ : Union[str, Any] = config.depths[-1] - config.num_metaad_blocks + 1
A__ : Any = convert_torch_checkpoint(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
A__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
A__ : Optional[int] = prepare_img()
A__ : Optional[Any] = 256
A__ : str = 224
A__ : List[str] = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
A__ : Tuple = processor(images=lowercase_ , return_tensors="""pt""" ).pixel_values
# original processing pipeline
A__ : List[Any] = Compose(
[
Resize(lowercase_ , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(lowercase_ ),
ToTensor(),
Normalize(lowercase_ , lowercase_ ),
] )
A__ : Any = image_transforms(lowercase_ ).unsqueeze(0 )
assert torch.allclose(lowercase_ , lowercase_ )
A__ : Optional[int] = model(lowercase_ )
A__ : List[str] = outputs.logits
A__ : Tuple = (1, 1000)
if "l1" in model_name:
A__ : List[str] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A__ : Any = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A__ : Union[str, Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowercase_ )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add model""" , use_temp_dir=lowercase_ , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add image processor""" , use_temp_dir=lowercase_ , )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
A_ : List[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 141 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class a__( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = '''swin'''
UpperCAmelCase_ : Optional[Any] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __lowerCAmelCase=224 , __lowerCAmelCase=4 , __lowerCAmelCase=3 , __lowerCAmelCase=96 , __lowerCAmelCase=[2, 2, 6, 2] , __lowerCAmelCase=[3, 6, 12, 24] , __lowerCAmelCase=7 , __lowerCAmelCase=4.0 , __lowerCAmelCase=True , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase="gelu" , __lowerCAmelCase=False , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-5 , __lowerCAmelCase=32 , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase)
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = len(__lowerCAmelCase)
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase = int(embed_dim * 2 ** (len(__lowerCAmelCase) - 1))
lowerCAmelCase = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__lowerCAmelCase) + 1)]
lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = version.parse('''1.11''' )
@property
def a_ ( self):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def a_ ( self):
"""simple docstring"""
return 1E-4
| 272 | '''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowercase = logging.get_logger(__name__)
__lowercase = '''T5Config'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''mt5'''
UpperCAmelCase_ : Tuple = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''mt5'''
UpperCAmelCase_ : int = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''mt5'''
UpperCAmelCase_ : Union[str, Any] = MTaConfig
| 272 | 1 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCAmelCase__ : List[Any] = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase__ : Dict = torch.permute(UpperCamelCase__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase__ ):
# linear layer
UpperCAmelCase__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase__ : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase__ : Any = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if "metadata" in layer:
UpperCAmelCase__ : Tuple = layer.split("""metadata""" )
UpperCAmelCase__ : Any = """""".join(split_layer[0] )[:-1]
UpperCAmelCase__ : List[str] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
UpperCAmelCase__ : Union[str, Any] = layer.split("""kvstore""" )
UpperCAmelCase__ : List[str] = """""".join(split_layer[0] )[:-1]
UpperCAmelCase__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
UpperCAmelCase__ : List[str] = layer.split("""/""" )
UpperCAmelCase__ : Any = """/""".join(split_layer[:-1] )
UpperCAmelCase__ : Dict = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCAmelCase__ : str = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
UpperCAmelCase__ : List[str] = """file"""
else:
UpperCAmelCase__ : List[str] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : str = rename_keys(UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = {}
for k, v in current_block.items():
UpperCAmelCase__ : List[str] = v
UpperCAmelCase__ : List[Any] = new_current_block
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = WEIGHTS_NAME ):
UpperCAmelCase__ : Any = convert_file_size_to_int(UpperCamelCase__ )
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : List[str] = {}
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : int = 0
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
UpperCAmelCase__ : Any = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
UpperCAmelCase__ : Union[str, Any] = flatten_dict(UpperCamelCase__ , sep="""/""" )
UpperCAmelCase__ : Optional[Any] = {}
for layer in checkpoint_info.keys():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = get_key_and_tensorstore_dict(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if curr_real_layer_name in all_layers:
UpperCAmelCase__ : Dict = content
else:
UpperCAmelCase__ : Optional[int] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCAmelCase__ : Union[str, Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCAmelCase__ : Dict = torch.tensor(UpperCamelCase__ )
UpperCAmelCase__ : Any = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = rename_base_flax_keys(tuple(key.split("""/""" ) ) , UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = """/""".join(UpperCamelCase__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCAmelCase__ : List[Any] = os.path.join(
UpperCamelCase__ , weights_name.replace(""".bin""" , f'''-{len(UpperCamelCase__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : str = raw_weights.to(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCAmelCase__ : Union[str, Any] = os.path.join(UpperCamelCase__ , weights_name.replace(""".bin""" , f'''-{len(UpperCamelCase__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCamelCase__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : List[str] = {}
for idx, shard in enumerate(UpperCamelCase__ ):
UpperCAmelCase__ : Optional[int] = weights_name.replace(
""".bin""" , f'''-{idx+1:05d}-of-{len(UpperCamelCase__ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
UpperCAmelCase__ : Dict = os.path.join(UpperCamelCase__ , weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase__ : int = shard
for key in shard:
UpperCAmelCase__ : int = shard_file
# Add the metadata
UpperCAmelCase__ : Dict = {"""total_size""": total_size}
UpperCAmelCase__ : List[str] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase__ : Any = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n"""
f.write(UpperCamelCase__ )
return metadata, index
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__A =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _UpperCamelCase ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCAmelCase__ : Tuple = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
UpperCAmelCase__ : Union[str, Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
UpperCAmelCase__ : Optional[Any] = TaTokenizer.from_pretrained("""t5-small""" )
UpperCAmelCase__ : Optional[Any] = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
UpperCAmelCase__ : Union[str, Any] = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ).input_ids
UpperCAmelCase__ : List[Any] = model.generate(UpperCamelCase__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 353 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__A =logging.getLogger(__name__)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , ):
UpperCAmelCase__ : str = bnb_quantization_config.load_in_abit
UpperCAmelCase__ : str = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
UpperCAmelCase__ : List[Any] = []
# custom device map
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(device_map.keys() ) > 1:
UpperCAmelCase__ : Dict = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCAmelCase__ : Any = get_keys_to_not_convert(UpperCamelCase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(UpperCamelCase__ )
# compatibility with peft
UpperCAmelCase__ : Optional[int] = load_in_abit
UpperCAmelCase__ : List[Any] = load_in_abit
UpperCAmelCase__ : Dict = get_parameter_device(UpperCamelCase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
UpperCAmelCase__ : Optional[int] = replace_with_bnb_layers(UpperCamelCase__ , UpperCamelCase__ , modules_to_not_convert=UpperCamelCase__ )
# convert param to the right dtype
UpperCAmelCase__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCAmelCase__ : List[str] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
UpperCAmelCase__ : int = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(UpperCamelCase__ ):
param.to(UpperCamelCase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
UpperCAmelCase__ : Tuple = replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , modules_to_not_convert=UpperCamelCase__ )
UpperCAmelCase__ : Any = get_quantized_model_device_map(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , max_memory=UpperCamelCase__ , no_split_module_classes=UpperCamelCase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Any = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=UpperCamelCase__ , offload_state_dict=UpperCamelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(UpperCamelCase__ , device_map=UpperCamelCase__ , offload_dir=UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ):
if device_map is None:
if torch.cuda.is_available():
UpperCAmelCase__ : Any = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
UpperCAmelCase__ : List[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Union[str, Any] = special_dtypes
UpperCAmelCase__ : Optional[int] = no_split_module_classes
UpperCAmelCase__ : Optional[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCAmelCase__ : Optional[int] = get_balanced_memory(
UpperCamelCase__ , low_zero=(device_map == """balanced_low_0""") , max_memory=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCAmelCase__ : str = max_memory
UpperCAmelCase__ : Any = infer_auto_device_map(UpperCamelCase__ , **UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# check if don't have any quantized module on the cpu
UpperCAmelCase__ : Optional[int] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCAmelCase__ : List[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ):
if modules_to_not_convert is None:
UpperCAmelCase__ : Any = []
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = _replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , ):
UpperCAmelCase__ : List[str] = False
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase__ : Dict = []
current_key_name.append(UpperCamelCase__ )
if isinstance(UpperCamelCase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCAmelCase__ : List[str] = """.""".join(UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCAmelCase__ : Union[str, Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCAmelCase__ : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=UpperCamelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCAmelCase__ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
UpperCAmelCase__ : int = module.weight.data
if module.bias is not None:
UpperCAmelCase__ : Dict = module.bias.data
bnb_module.requires_grad_(UpperCamelCase__ )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = True
if len(list(module.children() ) ) > 0:
UpperCAmelCase__ , UpperCAmelCase__ : str = _replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Any = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _UpperCamelCase ( UpperCamelCase__ ):
# Create a copy of the model
with init_empty_weights():
UpperCAmelCase__ : Optional[int] = deepcopy(UpperCamelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCAmelCase__ : Any = find_tied_parameters(UpperCamelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase__ : str = sum(UpperCamelCase__ , [] )
UpperCAmelCase__ : int = len(UpperCamelCase__ ) > 0
# Check if it is a base model
UpperCAmelCase__ : int = False
if hasattr(UpperCamelCase__ , """base_model_prefix""" ):
UpperCAmelCase__ : Tuple = not hasattr(UpperCamelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase__ : Optional[Any] = list(model.named_children() )
UpperCAmelCase__ : int = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase__ : Optional[int] = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
UpperCAmelCase__ : Any = list(set(UpperCamelCase__ ) ) + list(UpperCamelCase__ )
# remove ".weight" from the keys
UpperCAmelCase__ : int = [""".weight""", """.bias"""]
UpperCAmelCase__ : str = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase__ : List[Any] = name.replace(UpperCamelCase__ , """""" )
filtered_module_names.append(UpperCamelCase__ )
return filtered_module_names
def _UpperCamelCase ( UpperCamelCase__ ):
for m in model.modules():
if isinstance(UpperCamelCase__ , bnb.nn.Linearabit ):
return True
return False
def _UpperCamelCase ( UpperCamelCase__ ):
return next(parameter.parameters() ).device
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(UpperCamelCase__ , UpperCamelCase__ , 0 , dtype=UpperCamelCase__ , value=UpperCamelCase__ )
UpperCAmelCase__ : Any = param_name
UpperCAmelCase__ : Dict = model
if "." in tensor_name:
UpperCAmelCase__ : List[Any] = tensor_name.split(""".""" )
for split in splits[:-1]:
UpperCAmelCase__ : Optional[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
UpperCAmelCase__ : List[str] = new_module
UpperCAmelCase__ : Dict = splits[-1]
# offload weights
UpperCAmelCase__ : Any = False
offload_weight(module._parameters[tensor_name] , UpperCamelCase__ , UpperCamelCase__ , index=UpperCamelCase__ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , UpperCamelCase__ , index=UpperCamelCase__ , )
else:
offload_weight(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , index=UpperCamelCase__ )
offload_weight(UpperCamelCase__ , param_name.replace("""weight""" , """SCB""" ) , UpperCamelCase__ , index=UpperCamelCase__ )
set_module_tensor_to_device(UpperCamelCase__ , UpperCamelCase__ , """meta""" , dtype=UpperCamelCase__ , value=torch.empty(*param.size() ) ) | 283 | 0 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = CLIPConfig
__lowerCamelCase = ['CLIPEncoderLayer']
def __init__( self , lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowercase )
A__ = CLIPVisionModel(config.vision_config )
A__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase )
A__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase )
@torch.no_grad()
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = self.vision_model(lowercase )[1] # pooled_output
A__ = self.visual_projection(lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy()
A__ = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy()
A__ = []
A__ = image_embeds.shape[0]
for i in range(lowercase ):
A__ = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ = special_cos_dist[i][concept_idx]
A__ = self.special_care_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A__ = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ = cos_dist[i][concept_idx]
A__ = self.concept_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowercase )
result.append(lowercase )
A__ = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = self.vision_model(lowercase )[1] # pooled_output
A__ = self.visual_projection(lowercase )
A__ = cosine_distance(lowercase , self.special_care_embeds )
A__ = cosine_distance(lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
A__ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ = torch.any(special_scores > 0 , dim=1 )
A__ = special_care * 0.01
A__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A__ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 68 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase__ = """\
Text data.
Second line of data."""
lowerCAmelCase__ = """file"""
@pytest.fixture(scope="session" )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
A__ = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" )
with zstd.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> List[str]:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int ) -> Any:
'''simple docstring'''
A__ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
A__ = input_paths[compression_format]
A__ = tmp_path / "cache"
A__ = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ ) as f:
A__ = f.read()
with open(SCREAMING_SNAKE_CASE_ ) as f:
A__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: str ) -> Dict:
'''simple docstring'''
A__ = "custom_cache"
A__ = "custom_extracted_dir"
A__ = tmp_path / "custom_extracted_path"
if default_extracted:
A__ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , SCREAMING_SNAKE_CASE_ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(SCREAMING_SNAKE_CASE_ ) )
A__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A__ = xz_file
A__ = (
DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
)
A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
# relative path
A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[str]:
'''simple docstring'''
A__ = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
# relative path
A__ = "./__missing_file__.txt"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]:
'''simple docstring'''
A__ = get_from_cache(F'tmp://{tmpfs_file}' )
with open(SCREAMING_SNAKE_CASE_ ) as f:
A__ = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> int:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_get("https://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[Any]:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_get("ftp://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> str:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_get("s3://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_head("s3://huggingface.co" )
| 68 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
stooge(_lowerCamelCase , 0 , len(_lowerCamelCase ) - 1 )
return arr
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
snake_case_ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
snake_case_ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_lowerCamelCase , _lowerCamelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(_lowerCamelCase , i + t , (_lowerCamelCase) )
# Recursively sort first 2/3 elements
stooge(_lowerCamelCase , _lowerCamelCase , (h - t) )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE : int = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 370 |
"""simple docstring"""
import math
class __A :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : Tuple=0 ) ->Optional[int]: # a graph with Node 0,1,...,N-1
"""simple docstring"""
snake_case_ = n
snake_case_ = [
[math.inf for j in range(0 , UpperCAmelCase_ )] for i in range(0 , UpperCAmelCase_ )
] # adjacency matrix for weight
snake_case_ = [
[math.inf for j in range(0 , UpperCAmelCase_ )] for i in range(0 , UpperCAmelCase_ )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = w
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
snake_case_ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 233 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case__ :
def __init__( self , lowerCAmelCase__ = None ) -> None:
if components is None:
__magic_name__ : Any = []
__magic_name__ : List[str] = list(lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : Dict = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : int = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> float:
...
def __mul__( self , lowerCAmelCase__ ) -> float | Vector:
if isinstance(lowerCAmelCase__ , (float, int) ):
__magic_name__ : Optional[Any] = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
__magic_name__ : Optional[Any] = len(self )
__magic_name__ : List[Any] = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __magic_name__ ( self ) -> Vector:
return Vector(self.__components )
def __magic_name__ ( self , lowerCAmelCase__ ) -> float:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__magic_name__ : Optional[int] = value
def __magic_name__ ( self ) -> float:
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__magic_name__ : Dict = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> float:
__magic_name__ : Optional[Any] = self * other
__magic_name__ : List[str] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCamelCase ( _A ):
"""simple docstring"""
assert isinstance(_A, _A )
return Vector([0] * dimension )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
assert isinstance(_A, _A ) and (isinstance(_A, _A ))
__magic_name__ : Union[str, Any] = [0] * dimension
__magic_name__ : Optional[int] = 1
return Vector(_A )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
assert (
isinstance(_A, _A )
and isinstance(_A, _A )
and (isinstance(_A, (int, float) ))
)
return x * scalar + y
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : Union[str, Any] = [random.randint(_A, _A ) for _ in range(_A )]
return Vector(_A )
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : Dict = matrix
__magic_name__ : Tuple = w
__magic_name__ : Union[str, Any] = h
def __str__( self ) -> str:
__magic_name__ : Dict = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Tuple = []
for i in range(self.__height ):
__magic_name__ : Tuple = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Optional[Any] = []
for i in range(self.__height ):
__magic_name__ : int = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
def __mul__( self , lowerCAmelCase__ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
__magic_name__ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
__magic_name__ : Optional[int] = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
__magic_name__ : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def __magic_name__ ( self ) -> int:
return self.__height
def __magic_name__ ( self ) -> int:
return self.__width
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__magic_name__ : List[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__magic_name__ : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
__magic_name__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception("""Indices out of bounds""" )
def __magic_name__ ( self ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__magic_name__ : str = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : list[list[float]] = [[0] * n for _ in range(_A )]
return Matrix(_A, _A, _A )
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : list[list[float]] = [
[random.randint(_A, _A ) for _ in range(_A )] for _ in range(_A )
]
return Matrix(_A, _A, _A )
| 342 |
import math
class snake_case__ :
def __init__( self , lowerCAmelCase__=0 ) -> Optional[int]: # a graph with Node 0,1,...,N-1
__magic_name__ : Tuple = n
__magic_name__ : Union[str, Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # adjacency matrix for weight
__magic_name__ : List[Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # dp[i][j] stores minimum distance from i to j
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Dict = w
def __magic_name__ ( self ) -> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__magic_name__ : Optional[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
return self.dp[u][v]
if __name__ == "__main__":
__magic_name__: Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 342 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase = {
"""configuration_bridgetower""": [
"""BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BridgeTowerConfig""",
"""BridgeTowerTextConfig""",
"""BridgeTowerVisionConfig""",
],
"""processing_bridgetower""": ["""BridgeTowerProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""BridgeTowerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BridgeTowerForContrastiveLearning""",
"""BridgeTowerForImageAndTextRetrieval""",
"""BridgeTowerForMaskedLM""",
"""BridgeTowerModel""",
"""BridgeTowerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 192 | import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : int = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.dummy_uncond_unet
A_ : List[Any] = DDIMScheduler()
A_ : Any = self.dummy_vq_model
A_ : int = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
A_ : Any = torch.manual_seed(0 )
A_ : Dict = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' ).images
A_ : Any = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase )[0]
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
A_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
A_ : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
A_ : str = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
A_ : Any = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=lowercase , num_inference_steps=5 , output_type='numpy' ).images
A_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
A_ : Tuple = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
A_ : Dict = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 192 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCamelCase (__lowercase ):
'''simple docstring'''
def __init__( self , _UpperCamelCase=0.01 , _UpperCamelCase=1_0_0_0 ) -> List[str]:
UpperCAmelCase_ : Any = p_stop
UpperCAmelCase_ : Dict = max_length
def __iter__( self ) -> Any:
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Optional[Any] = False
while not stop and count < self.max_length:
yield count
count += 1
UpperCAmelCase_ : Dict = random.random() < self.p_stop
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=True ) -> int:
UpperCAmelCase_ : List[Any] = [
BatchSamplerShard(_a , 2 , _a , split_batches=_a , even_batches=_a )
for i in range(2 )
]
UpperCAmelCase_ : Optional[int] = [list(_a ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_a ) for shard in batch_sampler_shards] , [len(_a ) for e in expected] )
self.assertListEqual(_a , _a )
def __UpperCAmelCase ( self ) -> List[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
UpperCAmelCase_ : List[str] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(_a , _a )
UpperCAmelCase_ : str = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a , _a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase_ : Any = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(_a , _a )
UpperCAmelCase_ : Any = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase_ : Dict = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(_a , _a )
UpperCAmelCase_ : Tuple = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase_ : Optional[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(_a , _a )
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a )
# Check the shards when the dataset is very small.
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : List[str] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_a , _a )
UpperCAmelCase_ : Tuple = BatchSampler(range(2 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : Dict = [[], []]
self.check_batch_sampler_shards(_a , _a )
def __UpperCAmelCase ( self ) -> Optional[Any]:
# Check the shards when the dataset is a round multiple of batch size.
UpperCAmelCase_ : Any = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=_a )
UpperCAmelCase_ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
UpperCAmelCase_ : Tuple = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase_ : Tuple = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=_a )
UpperCAmelCase_ : int = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
UpperCAmelCase_ : Any = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=_a )
UpperCAmelCase_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase_ : Tuple = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=_a )
UpperCAmelCase_ : Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
UpperCAmelCase_ : int = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=_a )
UpperCAmelCase_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
# Check the shards when the dataset is very small.
UpperCAmelCase_ : Dict = BatchSampler(range(2 ) , batch_size=4 , drop_last=_a )
UpperCAmelCase_ : Tuple = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
UpperCAmelCase_ : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=_a )
UpperCAmelCase_ : List[Any] = [[], []]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
def __UpperCAmelCase ( self ) -> Optional[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
UpperCAmelCase_ : Dict = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
UpperCAmelCase_ : Any = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase_ : Dict = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
UpperCAmelCase_ : str = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase_ : Dict = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
UpperCAmelCase_ : str = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase_ : Optional[int] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
UpperCAmelCase_ : List[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
# Check the shards when the dataset is very small.
UpperCAmelCase_ : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
UpperCAmelCase_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=_a )
UpperCAmelCase_ : Tuple = [[], []]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
UpperCAmelCase_ : Tuple = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=_a )
UpperCAmelCase_ : str = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
UpperCAmelCase_ : Union[str, Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase_ : Optional[Any] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=_a )
UpperCAmelCase_ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
UpperCAmelCase_ : int = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=_a )
UpperCAmelCase_ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase_ : Dict = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=_a )
UpperCAmelCase_ : Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
UpperCAmelCase_ : Optional[int] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=_a )
UpperCAmelCase_ : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
# Check the shards when the dataset is very small.
UpperCAmelCase_ : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_a )
UpperCAmelCase_ : Tuple = [[[0, 1]], []]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
UpperCAmelCase_ : Dict = BatchSampler(range(2 ) , batch_size=4 , drop_last=_a )
UpperCAmelCase_ : Optional[int] = [[], []]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : int = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
UpperCAmelCase_ : str = [BatchSamplerShard(_a , 2 , _a , even_batches=_a ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=2 , _UpperCamelCase=False ) -> List[Any]:
random.seed(_a )
UpperCAmelCase_ : str = list(_a )
UpperCAmelCase_ : Dict = [
IterableDatasetShard(
_a , batch_size=_a , drop_last=_a , num_processes=_a , process_index=_a , split_batches=_a , )
for i in range(_a )
]
UpperCAmelCase_ : Optional[Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_a )
iterable_dataset_lists.append(list(_a ) )
UpperCAmelCase_ : str = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
UpperCAmelCase_ : List[str] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_a ) , len(_a ) )
self.assertTrue(len(_a ) % shard_batch_size == 0 )
UpperCAmelCase_ : Any = []
for idx in range(0 , len(_a ) , _a ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_a ) < len(_a ):
reference += reference
self.assertListEqual(_a , reference[: len(_a )] )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = 4_2
UpperCAmelCase_ : Union[str, Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
# Edge case with a very small dataset
UpperCAmelCase_ : Union[str, Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : List[Any] = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=_a )
UpperCAmelCase_ : Optional[int] = SkipBatchSampler(_a , 2 )
self.assertListEqual(list(_a ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Dict = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = DataLoader(list(range(1_6 ) ) , batch_size=4 )
UpperCAmelCase_ : List[str] = skip_first_batches(_a , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : int = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __UpperCAmelCase ( self ) -> Optional[Any]:
Accelerator()
UpperCAmelCase_ : Optional[int] = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 29 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : np.ndarray ,__a : Union[int, Iterable[int]] ,__a : bool ,__a : int ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(__a : List[str] ,__a : Dict ,__a : Any=0 ,__a : int=None ):
_a : Dict = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_a : Any = math.floor(val / multiple ) * multiple
if x < min_val:
_a : Dict = math.ceil(val / multiple ) * multiple
return x
_a : Union[str, Any] = (output_size, output_size) if isinstance(__a ,__a ) else output_size
_a , _a : List[Any] = get_image_size(__a )
_a , _a : Any = output_size
# determine new height and width
_a : Union[str, Any] = output_height / input_height
_a : Tuple = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_a : Optional[Any] = scale_width
else:
# fit height
_a : Tuple = scale_height
_a : Optional[Any] = constraint_to_multiple_of(scale_height * input_height ,multiple=__a )
_a : int = constraint_to_multiple_of(scale_width * input_width ,multiple=__a )
return (new_height, new_width)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = False , _a = 1 , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , **_a , ) -> None:
super().__init__(**_a )
_a : Optional[int] = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4}
_a : Optional[Any] = get_size_dict(_a )
_a : Any = do_resize
_a : Dict = size
_a : str = keep_aspect_ratio
_a : Any = ensure_multiple_of
_a : Optional[Any] = resample
_a : List[Any] = do_rescale
_a : int = rescale_factor
_a : Any = do_normalize
_a : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowercase ( self , _a , _a , _a = False , _a = 1 , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray:
_a : str = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Optional[Any] = get_resize_output_image_size(
_a , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_a , multiple=_a , )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> int:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : str = get_size_dict(_a )
_a : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_a : Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_a : str = resample if resample is not None else self.resample
_a : str = do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_a : str = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Dict = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
_a : int = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : Optional[int] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : int = [to_channel_dimension_format(_a , _a ) for image in images]
_a : Tuple = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
def __lowercase ( self , _a , _a = None ) -> Any:
_a : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_a ):
_a : List[Any] = target_sizes.numpy()
_a : str = []
for idx in range(len(_a ) ):
_a : str = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_a )
_a : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
_a : Tuple = logits.argmax(dim=1 )
_a : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 235 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ) -> Optional[int]:
snake_case = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
snake_case = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
snake_case = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
snake_case = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
snake_case = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
snake_case = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
snake_case = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
snake_case = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
snake_case = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
snake_case = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
snake_case = key.replace("""image_encoder.module""" , """flava.image_model""" )
snake_case = key.replace("""text_encoder.module""" , """flava.text_model""" )
snake_case = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
snake_case = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
snake_case = key.replace("""text_projection""" , """flava.text_projection""" )
snake_case = key.replace("""image_projection""" , """flava.image_projection""" )
snake_case = value.float()
for key, value in codebook_state_dict.items():
snake_case = value
return upgrade
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=None ) -> Optional[Any]:
if config_path is not None:
snake_case = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
snake_case = FlavaConfig()
snake_case = FlavaForPreTraining(SCREAMING_SNAKE_CASE_ ).eval()
snake_case = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , save_checkpoint=SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
snake_case = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
else:
snake_case = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
snake_case = upgrade_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
snake_case = hf_model.state_dict()
snake_case = count_parameters(SCREAMING_SNAKE_CASE_ )
snake_case = count_parameters(SCREAMING_SNAKE_CASE_ ) + count_parameters(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 363 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/vocab.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def lowerCAmelCase ( self : str )-> Any:
snake_case = 0
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig()
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , __snake_case ) )
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Optional[int] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__snake_case )
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write("""{}""" )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
snake_case = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
snake_case = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case , use_fast=__snake_case )
snake_case = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoProcessor.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Any )-> Tuple:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local classes.
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : str )-> Union[str, Any]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def lowerCAmelCase ( self : Any )-> List[str]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Tuple:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : List[Any] )-> str:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor""" ) , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : Any )-> Optional[Any]:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor-org""" ) , push_to_hub=__snake_case , use_auth_token=self._token , organization="""valid_org""" , )
snake_case = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : List[str] )-> int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
snake_case = Repository(__snake_case , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(__snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) ) as f:
snake_case = json.load(__snake_case )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_processing.py""" ) ) )
repo.push_to_hub()
snake_case = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 3 | 0 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 19 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.inf
def set_batch_size(lowerCAmelCase__ ) -> None:
nonlocal batch_size
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and feature.dtype == "binary":
lowercase = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(lowerCAmelCase__ , lowerCAmelCase__ )
return None if batch_size is np.inf else batch_size
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = False ,A__ = False ,A__ = None ,**A__ ,):
super().__init__(
A__ ,split=A__ ,features=A__ ,cache_dir=A__ ,keep_in_memory=A__ ,streaming=A__ ,num_proc=A__ ,**A__ ,)
lowercase = path_or_paths if isinstance(A__ ,A__) else {self.split: path_or_paths}
lowercase = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
lowercase = Parquet(
cache_dir=A__ ,data_files=A__ ,features=A__ ,hash=A__ ,**A__ ,)
def A__ ( self):
# Build iterable dataset
if self.streaming:
lowercase = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
lowercase = None
lowercase = None
lowercase = None
lowercase = None
self.builder.download_and_prepare(
download_config=A__ ,download_mode=A__ ,verification_mode=A__ ,base_path=A__ ,num_proc=self.num_proc ,)
lowercase = self.builder.as_dataset(
split=self.split ,verification_mode=A__ ,in_memory=self.keep_in_memory)
return dataset
class lowercase :
def __init__( self ,A__ ,A__ ,A__ = None ,**A__ ,):
lowercase = dataset
lowercase = path_or_buf
lowercase = batch_size or get_writer_batch_size(dataset.features)
lowercase = parquet_writer_kwargs
def A__ ( self):
lowercase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike)):
with open(self.path_or_buf ,'''wb+''') as buffer:
lowercase = self._write(file_obj=A__ ,batch_size=A__ ,**self.parquet_writer_kwargs)
else:
lowercase = self._write(file_obj=self.path_or_buf ,batch_size=A__ ,**self.parquet_writer_kwargs)
return written
def A__ ( self ,A__ ,A__ ,**A__):
lowercase = 0
lowercase = parquet_writer_kwargs.pop('''path_or_buf''' ,A__)
lowercase = self.dataset.features.arrow_schema
lowercase = pq.ParquetWriter(A__ ,schema=A__ ,**A__)
for offset in logging.tqdm(
range(0 ,len(self.dataset) ,A__) ,unit='''ba''' ,disable=not logging.is_progress_bar_enabled() ,desc='''Creating parquet from Arrow format''' ,):
lowercase = query_table(
table=self.dataset._data ,key=slice(A__ ,offset + batch_size) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(A__)
written += batch.nbytes
writer.close()
return written
| 97 | 0 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowercase : Optional[Any] = 'bart'
lowercase : int = True
@st.cache(allow_output_mutation=UpperCamelCase_ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
A : int = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
A : Union[str, Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
A : Optional[Any] = qar_model.eval()
else:
A, A : int = (None, None)
if MODEL_TYPE == "bart":
A : Dict = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
A : str = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
A : Any = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
A : Dict = sas_model.eval()
else:
A, A : str = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase_ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
A : Union[str, Any] = faiss.StandardGpuResources()
A : List[Any] = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
A : Union[str, Any] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
A : Optional[Any] = faiss.IndexFlatIP(128 )
A : Optional[int] = faiss.index_cpu_to_gpu(UpperCamelCase_ , 1 , UpperCamelCase_ )
wikiaab_gpu_index_flat.add(UpperCamelCase_ ) # TODO fix for larger GPU
else:
A, A : Optional[Any] = (None, None)
A : Optional[int] = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase_ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[Any] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
A : Optional[Any] = elia['''train_eli5''']
A : Tuple = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
A : Union[str, Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(UpperCamelCase_ )
return (elia_train, eli5_train_q_index)
lowercase , lowercase , lowercase : Optional[int] = load_indexes()
lowercase , lowercase , lowercase , lowercase : Optional[Any] = load_models()
lowercase , lowercase : str = load_train_data()
def lowerCAmelCase_ ( snake_case__ , snake_case__=10 ):
'''simple docstring'''
A : List[Any] = embed_questions_for_retrieval([question] , UpperCamelCase_ , UpperCamelCase_ )
A, A : Optional[int] = eli5_train_q_index.search(UpperCamelCase_ , UpperCamelCase_ )
A : Optional[Any] = [elia_train[int(UpperCamelCase_ )] for i in I[0]]
return nn_examples
def lowerCAmelCase_ ( snake_case__ , snake_case__="wiki40b" , snake_case__="dense" , snake_case__=10 ):
'''simple docstring'''
if source == "none":
A, A : Dict = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
A, A : int = query_qa_dense_index(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
A, A : Union[str, Any] = query_es_index(
UpperCamelCase_ , UpperCamelCase_ , index_name='''english_wiki40b_snippets_100w''' , n_results=UpperCamelCase_ , )
A : Tuple = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
A : Tuple = '''question: {} context: {}'''.format(UpperCamelCase_ , UpperCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case__ : None),
} )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=64 , snake_case__=256 , snake_case__=False , snake_case__=2 , snake_case__=0.95 , snake_case__=0.8 ):
'''simple docstring'''
with torch.no_grad():
A : Any = qa_sas_generate(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_answers=1 , num_beams=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ , do_sample=UpperCamelCase_ , temp=UpperCamelCase_ , top_p=UpperCamelCase_ , top_k=UpperCamelCase_ , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
lowercase : int = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
lowercase : Tuple = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
lowercase : List[str] = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
lowercase : Any = st.sidebar.checkbox('Demo options')
if demo_options:
lowercase : Optional[int] = st.sidebar.selectbox(
'',
action_list,
index=3,
)
lowercase : Tuple = action_list.index(action_st)
lowercase : Optional[int] = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
lowercase : Optional[Any] = show_type == 'Show full text of passages'
else:
lowercase : str = 3
lowercase : List[Any] = True
lowercase : Union[str, Any] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
lowercase : Optional[int] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
lowercase : List[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
lowercase : str = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
lowercase : List[Any] = 'wiki40b'
lowercase : Union[str, Any] = 'dense'
lowercase : List[str] = 'beam'
lowercase : str = 2
lowercase : Union[str, Any] = 64
lowercase : List[str] = 2_56
lowercase : List[str] = None
lowercase : int = None
lowercase : Optional[int] = st.sidebar.checkbox('Generation options')
if generate_options:
lowercase : str = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
lowercase : int = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
lowercase : List[str] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
lowercase : Union[str, Any] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowercase : int = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
lowercase : List[Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
lowercase : List[Any] = None
# start main text
lowercase : str = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
lowercase : int = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowercase : str = st.text_input('Enter your question here:', '')
else:
lowercase : str = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
lowercase , lowercase : str = make_support(question, source=wiki_source, method='dense', n_results=10)
lowercase , lowercase : Any = make_support(question, source=wiki_source, method='sparse', n_results=10)
lowercase : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowercase : List[str] = support_list[:10]
lowercase : Dict = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
lowercase , lowercase : Any = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowercase , lowercase : List[str] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
lowercase : Tuple = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
lowercase : Union[str, Any] = res[1].strip()
if sec_titles == "":
lowercase : Tuple = '[{}]({})'.format(res[0], wiki_url)
else:
lowercase : Optional[int] = sec_titles.split(' & ')
lowercase : Tuple = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style=\"font-family:arial; font-size:10pt;\">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
lowercase : List[str] = find_nearest_training(question)
lowercase : str = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
lowercase : Any = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
lowercase : List[str] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 3 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ):
__lowerCAmelCase = """swin"""
__lowerCAmelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , lowerCamelCase_ : Optional[int]=224 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Dict=[3, 6, 12, 24] , lowerCamelCase_ : str=7 , lowerCamelCase_ : Tuple=4.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : str=1E-5 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCamelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return 1E-4
| 343 | 0 |
def __lowerCamelCase (UpperCAmelCase__ : int ):
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and number_of_steps > 0
), F"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1, 1
for _ in range(number_of_steps - 1 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 206 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowercase ( a ):
lowercase__ : Tuple = """unispeech-sat"""
def __init__( self : str , _UpperCamelCase : Tuple=32 , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : Tuple=12 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Tuple=3_072 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Union[str, Any]="group" , _UpperCamelCase : Optional[int]="gelu" , _UpperCamelCase : Tuple=(512, 512, 512, 512, 512, 512, 512) , _UpperCamelCase : List[str]=(5, 2, 2, 2, 2, 2, 2) , _UpperCamelCase : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Dict=128 , _UpperCamelCase : Optional[int]=16 , _UpperCamelCase : Tuple=False , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[Any]=0.0_5 , _UpperCamelCase : Union[str, Any]=10 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : str=0.0 , _UpperCamelCase : List[Any]=10 , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : Any=320 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : str=0.1 , _UpperCamelCase : str=100 , _UpperCamelCase : int=256 , _UpperCamelCase : Optional[Any]=256 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : str="mean" , _UpperCamelCase : int=False , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Any=256 , _UpperCamelCase : str=(512, 512, 512, 512, 1_500) , _UpperCamelCase : List[Any]=(5, 3, 3, 1, 1) , _UpperCamelCase : Union[str, Any]=(1, 2, 3, 1, 1) , _UpperCamelCase : Any=512 , _UpperCamelCase : str=0 , _UpperCamelCase : int=1 , _UpperCamelCase : Any=2 , _UpperCamelCase : Optional[Any]=504 , **_UpperCamelCase : str , ) -> int:
'''simple docstring'''
super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim )
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = num_clusters
SCREAMING_SNAKE_CASE = do_stable_layer_norm
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE = num_codevectors_per_group
SCREAMING_SNAKE_CASE = num_codevector_groups
SCREAMING_SNAKE_CASE = contrastive_logits_temperature
SCREAMING_SNAKE_CASE = feat_quantizer_dropout
SCREAMING_SNAKE_CASE = num_negatives
SCREAMING_SNAKE_CASE = codevector_dim
SCREAMING_SNAKE_CASE = proj_codevector_dim
SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 206 | 1 |
from __future__ import annotations
def lowerCAmelCase__ ( a__: list[list[int]] ) -> int:
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(lowerCAmelCase__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(lowerCAmelCase__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 1_0
lowercase_ = 2_5_6
def lowercase ( lowerCAmelCase__ : List[str] ) -> Optional[MinHash]:
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
__a = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def lowercase ( lowerCAmelCase__ : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , *,
_a = 0.85 , ):
__a = duplication_jaccard_threshold
__a = NUM_PERM
__a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a , _a ):
__a = self._index.query(_a )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(_a , _a )
if len(_a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_a )
def __UpperCAmelCase ( self ):
__a = []
for base, duplicates in self._duplicate_clusters.items():
__a = [base] + list(_a )
# reformat the cluster to be a list of dict
__a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_a )
return duplicate_clusters
def __UpperCAmelCase ( self , _a ):
__a = self.get_duplicate_clusters()
with open(_a , '''w''' ) as f:
json.dump(_a , _a )
def lowercase ( lowerCAmelCase__ : List[str] ) -> int:
__a , __a = element
__a = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowercase ( lowerCAmelCase__ : Type[Dataset] ) -> str:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float ) -> Dict:
__a = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> float:
__a = get_tokens(lowerCAmelCase__ )
__a = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase_ = None
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
__a = []
for elementa in cluster:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__a = 1
extremes.append(lowerCAmelCase__ )
return extremes
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
global _shared_dataset
__a = dataset
__a = []
__a = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
__a = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
__a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__a = {}
__a = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
__a = element
__a = duplicate_indices - set(extreme_dict.keys() )
__a = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__a = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(lowerCAmelCase__ )}''' )
print(f'''Number of duplicate clusters: {len(lowerCAmelCase__ )}''' )
print(f'''Files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Unique files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Filtered dataset size: {len(lowerCAmelCase__ )}''' )
return ds_filter, duplicate_clusters
| 45 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = "▁"
lowercase__ = {"vocab_file": "sentencepiece.bpe.model"}
lowercase__ = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
lowercase__ = {
"facebook/mbart-large-50-one-to-many-mmt": 1024,
}
# fmt: off
lowercase__ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Tuple = ["""input_ids""", """attention_mask"""]
UpperCAmelCase_ : List[int] = []
UpperCAmelCase_ : List[int] = []
def __init__( self : Any , lowercase_ : Tuple , lowercase_ : Dict=None , lowercase_ : Tuple=None , lowercase_ : Tuple="</s>" , lowercase_ : Any="</s>" , lowercase_ : Dict="<s>" , lowercase_ : str="<unk>" , lowercase_ : Optional[Any]="<pad>" , lowercase_ : List[Any]="<mask>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Optional[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
UpperCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase : int = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowercase_ , tgt_lang=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
UpperCAmelCase : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase : List[Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase : List[str] = 1
UpperCAmelCase : str = len(self.sp_model )
UpperCAmelCase : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase_ )
}
UpperCAmelCase : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase : Union[str, Any] = src_lang if src_lang is not None else 'en_XX'
UpperCAmelCase : Any = self.lang_code_to_id[self._src_lang]
UpperCAmelCase : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase_ ( self : Any ) -> str:
return self._src_lang
@src_lang.setter
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : str ) -> None:
UpperCAmelCase : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Union[str, Any] ) -> Dict:
UpperCAmelCase : Optional[int] = self.__dict__.copy()
UpperCAmelCase : List[str] = None
return state
def __setstate__( self : Any , lowercase_ : Dict ) -> None:
UpperCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase : Tuple = {}
UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
UpperCAmelCase : int = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self : str , lowercase_ : str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase_ ( self : int , lowercase_ : str ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase : Tuple = self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : int ) -> Optional[int]:
UpperCAmelCase : List[Any] = []
UpperCAmelCase : Dict = ''
UpperCAmelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_ ) + token
UpperCAmelCase : List[str] = True
UpperCAmelCase : int = []
else:
current_sub_tokens.append(lowercase_ )
UpperCAmelCase : List[Any] = False
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : Optional[Any] = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
UpperCAmelCase : Union[str, Any] = [1] * len(self.prefix_tokens )
UpperCAmelCase : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase_ )) + ([0] * len(lowercase_ )) + suffix_ones
def UpperCAmelCase_ ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[str] , lowercase_ : Optional[str] , **lowercase_ : Optional[Any] ) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCAmelCase : List[Any] = src_lang
UpperCAmelCase : int = self(lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , **lowercase_ )
UpperCAmelCase : Any = self.convert_tokens_to_ids(lowercase_ )
UpperCAmelCase : Dict = tgt_lang_id
return inputs
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : str = "en_XX" , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "ro_RO" , **lowercase_ : Dict , ) -> BatchEncoding:
UpperCAmelCase : Optional[int] = src_lang
UpperCAmelCase : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase_ ( self : Any , lowercase_ : str ) -> None:
UpperCAmelCase : Union[str, Any] = self.lang_code_to_id[src_lang]
UpperCAmelCase : Optional[Any] = [self.cur_lang_code_id]
UpperCAmelCase : Dict = [self.eos_token_id]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> None:
UpperCAmelCase : List[str] = self.lang_code_to_id[tgt_lang]
UpperCAmelCase : List[str] = [self.cur_lang_code_id]
UpperCAmelCase : str = [self.eos_token_id]
| 366 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowercase__ = TypeVar("KEY")
lowercase__ = TypeVar("VAL")
@dataclass(frozen=_snake_case , slots=_snake_case )
class A_ ( Generic[KEY, VAL] ):
'''simple docstring'''
UpperCAmelCase_ : KEY
UpperCAmelCase_ : VAL
class A_ ( _Item ):
'''simple docstring'''
def __init__( self : Any ) -> None:
super().__init__(lowercase_ , lowercase_ )
def __bool__( self : List[str] ) -> bool:
return False
lowercase__ = _DeletedItem()
class A_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : int = 8 , lowercase_ : float = 0.75 ) -> None:
UpperCAmelCase : Dict = initial_block_size
UpperCAmelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
UpperCAmelCase : Any = capacity_factor
UpperCAmelCase : Union[str, Any] = 0
def UpperCAmelCase_ ( self : str , lowercase_ : KEY ) -> int:
return hash(lowercase_ ) % len(self._buckets )
def UpperCAmelCase_ ( self : Any , lowercase_ : int ) -> int:
return (ind + 1) % len(self._buckets )
def UpperCAmelCase_ ( self : Dict , lowercase_ : int , lowercase_ : KEY , lowercase_ : VAL ) -> bool:
UpperCAmelCase : List[Any] = self._buckets[ind]
if not stored:
UpperCAmelCase : Dict = _Item(lowercase_ , lowercase_ )
self._len += 1
return True
elif stored.key == key:
UpperCAmelCase : Dict = _Item(lowercase_ , lowercase_ )
return True
else:
return False
def UpperCAmelCase_ ( self : Any ) -> bool:
UpperCAmelCase : List[str] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
UpperCAmelCase : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase_ ( self : Dict , lowercase_ : int ) -> None:
UpperCAmelCase : int = self._buckets
UpperCAmelCase : List[str] = [None] * new_size
UpperCAmelCase : Dict = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase_ ( self : Dict ) -> None:
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : KEY ) -> Iterator[int]:
UpperCAmelCase : Dict = self._get_bucket_index(lowercase_ )
for _ in range(len(self._buckets ) ):
yield ind
UpperCAmelCase : Union[str, Any] = self._get_next_ind(lowercase_ )
def UpperCAmelCase_ ( self : Dict , lowercase_ : KEY , lowercase_ : VAL ) -> None:
for ind in self._iterate_buckets(lowercase_ ):
if self._try_set(lowercase_ , lowercase_ , lowercase_ ):
break
def __setitem__( self : Union[str, Any] , lowercase_ : KEY , lowercase_ : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(lowercase_ , lowercase_ )
def __delitem__( self : Tuple , lowercase_ : KEY ) -> None:
for ind in self._iterate_buckets(lowercase_ ):
UpperCAmelCase : int = self._buckets[ind]
if item is None:
raise KeyError(lowercase_ )
if item is _deleted:
continue
if item.key == key:
UpperCAmelCase : Union[str, Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Dict , lowercase_ : KEY ) -> VAL:
for ind in self._iterate_buckets(lowercase_ ):
UpperCAmelCase : List[str] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase_ )
def __len__( self : Optional[Any] ) -> int:
return self._len
def __iter__( self : int ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ) -> str:
UpperCAmelCase : int = ' ,'.join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 280 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
def __init__( self : str , snake_case_ : int , snake_case_ : Union[str, Any]=2 , snake_case_ : List[Any]=True , snake_case_ : str=False , snake_case_ : str=1_0 , snake_case_ : str=3 , snake_case_ : Dict=3_2 * 4 , snake_case_ : Any=3_2 * 6 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[int]=3_2 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = is_training
_UpperCAmelCase = use_auxiliary_loss
_UpperCAmelCase = num_queries
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_size
_UpperCAmelCase = max_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = mask_feature_size
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case_ )
_UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ )
_UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5
).float()
_UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long()
_UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase ( self : List[Any] ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowercase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ):
_UpperCAmelCase = output.encoder_hidden_states
_UpperCAmelCase = output.pixel_decoder_hidden_states
_UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers )
def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any]=False ):
with torch.no_grad():
_UpperCAmelCase = MaskFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_UpperCAmelCase = model(snake_case_ , output_hidden_states=snake_case_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_ )
def lowercase ( self : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : List[Any] ):
_UpperCAmelCase = MaskFormerForInstanceSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
def comm_check_on_output(snake_case_ : int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_UpperCAmelCase = model(snake_case_ )
comm_check_on_output(snake_case_ )
_UpperCAmelCase = model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
comm_check_on_output(snake_case_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_lowerCamelCase : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = False
_lowerCamelCase : List[Any] = False
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = MaskFormerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowercase ( self : int ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def lowercase ( self : Any ):
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def lowercase ( self : List[str] ):
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def lowercase ( self : List[str] ):
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def lowercase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowercase ( self : Any ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase ( self : Union[str, Any] ):
pass
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
@slow
def lowercase ( self : Optional[int] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
_UpperCAmelCase = MaskFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = (self.model_tester.min_size,) * 2
_UpperCAmelCase = {
"pixel_values": torch.randn((2, 3, *size) , device=snake_case_ ),
"mask_labels": torch.randn((2, 1_0, *size) , device=snake_case_ ),
"class_labels": torch.zeros(2 , 1_0 , device=snake_case_ ).long(),
}
_UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ )
_UpperCAmelCase = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
def lowercase ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ ).to(snake_case_ )
_UpperCAmelCase = model(**snake_case_ , output_attentions=snake_case_ )
self.assertTrue(outputs.attentions is not None )
def lowercase ( self : int ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss
loss.backward()
def lowercase ( self : int ):
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
_UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__SCREAMING_SNAKE_CASE :Dict = 1e-4
def UpperCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Dict ):
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def lowercase ( self : List[Any] ):
_UpperCAmelCase = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(snake_case_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
_UpperCAmelCase = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_UpperCAmelCase = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_UpperCAmelCase = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Tuple ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
_UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : int ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
_UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase = inputs["pixel_values"].to(snake_case_ )
_UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["mask_labels"]]
_UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
| 22 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A : str = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 57 | 0 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = {}
lowercase = tokenizer(example['''content'''] , truncation=lowerCAmelCase__ )['''input_ids''']
lowercase = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
lowercase__ :Dict = HfArgumentParser(PretokenizationArguments)
lowercase__ :Optional[Any] = parser.parse_args()
if args.num_workers is None:
lowercase__ :List[Any] = multiprocessing.cpu_count()
lowercase__ :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowercase__ :int = time.time()
lowercase__ :List[str] = load_dataset(args.dataset_name, split="train")
print(F'Dataset loaded in {time.time()-t_start:.2f}s')
lowercase__ :Tuple = time.time()
lowercase__ :List[Any] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F'Dataset tokenized in {time.time()-t_start:.2f}s')
lowercase__ :List[str] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
| 371 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ :Dict = logging.get_logger(__name__)
lowercase__ :Optional[Any] = "▁"
lowercase__ :str = {"vocab_file": "prophetnet.tokenizer"}
lowercase__ :List[str] = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
lowercase__ :Optional[Any] = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
lowercase__ :List[Any] = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = collections.OrderedDict()
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase = reader.readlines()
for index, token in enumerate(lowerCAmelCase__ ):
lowercase = token.rstrip('''\n''' )
lowercase = index
return vocab
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[Any] =VOCAB_FILES_NAMES
lowercase_ : Any =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str =['''input_ids''', '''attention_mask''']
def __init__( self ,A__ ,A__="[SEP]" ,A__="[SEP]" ,A__="[SEP]" ,A__="[UNK]" ,A__="[PAD]" ,A__="[CLS]" ,A__="[MASK]" ,A__ = None ,**A__ ,):
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A__ ,eos_token=A__ ,sep_token=A__ ,unk_token=A__ ,pad_token=A__ ,cls_token=A__ ,mask_token=A__ ,sp_model_kwargs=self.sp_model_kwargs ,**A__ ,)
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''')
raise
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(A__))
lowercase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowercase = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(1_0):
lowercase = f'[unused{i}]'
lowercase = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowercase = 1_2
lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(A__)
def __getstate__( self):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self ,A__):
lowercase = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''')
raise
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs'''):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A__ ( self ,A__ ,A__ = None ,A__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ ,token_ids_a=A__ ,already_has_special_tokens=A__)
if token_ids_a is None:
return ([0] * len(A__)) + [1]
return ([0] * len(A__)) + [1] + ([0] * len(A__)) + [1]
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset
def A__ ( self):
lowercase = {self.convert_ids_to_tokens(A__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self ,A__):
return self.sp_model.encode(A__ ,out_type=A__)
def A__ ( self ,A__):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase = self.sp_model.PieceToId(A__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self ,A__):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self ,A__):
lowercase = ''''''.join(A__).replace(A__ ,''' ''').strip()
return out_string
def A__ ( self ,A__ ,A__ = None):
if not os.path.isdir(A__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
lowercase = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(A__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,A__)
elif not os.path.isfile(self.vocab_file):
with open(A__ ,'''wb''') as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(A__)
return (out_vocab_file,)
def A__ ( self ,A__ ,A__ = None):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowercase = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 97 | 0 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = len(_UpperCAmelCase )
for i in range(length - 1 ):
lowerCAmelCase : Optional[int] = i
for k in range(i + 1, _UpperCAmelCase ):
if collection[k] < collection[least]:
lowerCAmelCase : Any = k
if least != i:
lowerCAmelCase , lowerCAmelCase : str = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__A : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
__A : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 138 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : Optional[Any] = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = "vit_mae"
def __init__( self : Optional[int] , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Optional[Any]=3072 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-12 , UpperCAmelCase_ : Any=224 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : List[Any]=2048 , UpperCAmelCase_ : Tuple=0.75 , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : str , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : List[str] = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Any = image_size
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : Union[str, Any] = num_channels
lowerCAmelCase : Any = qkv_bias
lowerCAmelCase : Union[str, Any] = decoder_num_attention_heads
lowerCAmelCase : Tuple = decoder_hidden_size
lowerCAmelCase : int = decoder_num_hidden_layers
lowerCAmelCase : Optional[Any] = decoder_intermediate_size
lowerCAmelCase : Union[str, Any] = mask_ratio
lowerCAmelCase : Any = norm_pix_loss
| 138 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase__ = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
lowerCAmelCase__ = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
lowerCAmelCase__ = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__(datasets.Metric ):
"""simple docstring"""
def snake_case ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[List[List[str]]] , SCREAMING_SNAKE_CASE : List[List[str]] , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=SCREAMING_SNAKE_CASE , hypotheses=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE )
}
| 121 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = "cpu" , lowerCamelCase__ = None ):
"""simple docstring"""
lowercase__ : Any = torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCamelCase__ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
lowercase__ : int = v.half()
if save_path is None: # overwrite src_path
lowercase__ : Optional[Any] = src_path
torch.save(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
fire.Fire(convert)
| 121 | 1 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __SCREAMING_SNAKE_CASE (TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
"""simple docstring"""
def __init__( self : Any , __a : Tuple=None , **__a : Tuple ):
super().__init__(features=__a )
_a = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCamelCase__ ( self : Optional[Any] , __a : Any ):
import torch
if isinstance(__a , __a ) and column:
if all(
isinstance(__a , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__a )
return column
def UpperCamelCase__ ( self : Optional[int] , __a : Tuple ):
import torch
if isinstance(__a , (str, bytes, type(__a )) ):
return value
elif isinstance(__a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_a = {}
if isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
_a = {"dtype": torch.intaa}
elif isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_a = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__a , PIL.Image.Image ):
_a = np.asarray(__a )
return torch.tensor(__a , **{**default_dtype, **self.torch_tensor_kwargs} )
def UpperCamelCase__ ( self : Optional[int] , __a : List[Any] ):
import torch
# support for torch, tf, jax etc.
if hasattr(__a , "__array__" ) and not isinstance(__a , torch.Tensor ):
_a = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__a , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
elif isinstance(__a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
return self._tensorize(__a )
def UpperCamelCase__ ( self : int , __a : dict ):
return map_nested(self._recursive_tensorize , __a , map_list=__a )
def UpperCamelCase__ ( self : Any , __a : pa.Table ):
_a = self.numpy_arrow_extractor().extract_row(__a )
_a = self.python_features_decoder.decode_row(__a )
return self.recursive_tensorize(__a )
def UpperCamelCase__ ( self : List[Any] , __a : pa.Table ):
_a = self.numpy_arrow_extractor().extract_column(__a )
_a = self.python_features_decoder.decode_column(__a , pa_table.column_names[0] )
_a = self.recursive_tensorize(__a )
_a = self._consolidate(__a )
return column
def UpperCamelCase__ ( self : Optional[Any] , __a : pa.Table ):
_a = self.numpy_arrow_extractor().extract_batch(__a )
_a = self.python_features_decoder.decode_batch(__a )
_a = self.recursive_tensorize(__a )
for column_name in batch:
_a = self._consolidate(batch[column_name] )
return batch
| 63 |
from collections import defaultdict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = first_str.lower().strip()
__a = second_str.lower().strip()
# Remove whitespace
__a = first_str.replace(''' ''' , '''''' )
__a = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
# Default values for count should be 0
__a = defaultdict(_UpperCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_UpperCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case :Any = input('''Enter the first string ''').strip()
__snake_case :int = input('''Enter the second string ''').strip()
__snake_case :int = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 49 | 0 |
from __future__ import annotations
def UpperCamelCase( lowercase_ ) -> bool:
'''simple docstring'''
if len(lowercase_ ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
snake_case_ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 |
def UpperCamelCase( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10)) | 34 | 1 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase_ : Any = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( __A : torch.nn.Module , __A : BnbQuantizationConfig , __A : Union[str, os.PathLike] = None , __A : Optional[Dict[str, Union[int, str, torch.device]]] = None , __A : Optional[List[str]] = None , __A : Optional[Dict[Union[int, str], Union[int, str]]] = None , __A : Optional[Union[str, os.PathLike]] = None , __A : bool = False , ) -> Dict:
"""simple docstring"""
a_ : Optional[int] = bnb_quantization_config.load_in_abit
a_ : Any = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
a_ : List[Any] = []
# custom device map
if isinstance(__A , __A ) and len(device_map.keys() ) > 1:
a_ : Optional[int] = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
a_ : List[str] = get_keys_to_not_convert(__A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__A )
a_ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
a_ : List[Any] = []
a_ : str = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__A )
# compatibility with peft
a_ : Any = load_in_abit
a_ : List[Any] = load_in_abit
a_ : List[Any] = get_parameter_device(__A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
a_ : Any = replace_with_bnb_layers(__A , __A , modules_to_not_convert=__A )
# convert param to the right dtype
a_ : Union[str, Any] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
a_ : int = name.replace('.weight' , '' ).replace('.bias' , '' )
a_ : int = getattr(__A , __A , __A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__A ):
param.to(__A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
a_ : List[Any] = replace_with_bnb_layers(
__A , __A , modules_to_not_convert=__A )
a_ : Any = get_quantized_model_device_map(
__A , __A , __A , max_memory=__A , no_split_module_classes=__A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
a_ : Dict = True
a_ : Any = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
__A , __A , __A , dtype=bnb_quantization_config.torch_dtype , offload_folder=__A , offload_state_dict=__A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__A , device_map=__A , offload_dir=__A )
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Dict , __A : List[Any]=None , __A : Union[str, Any]=None , __A : Optional[Any]=None ) -> str:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
a_ : Dict = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(__A , __A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
a_ : Any = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
a_ : Optional[Any] = {}
a_ : Union[str, Any] = special_dtypes
a_ : Optional[int] = no_split_module_classes
a_ : str = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
a_ : Optional[Any] = get_balanced_memory(
__A , low_zero=(device_map == 'balanced_low_0') , max_memory=__A , **__A , )
a_ : int = max_memory
a_ : Optional[int] = infer_auto_device_map(__A , **__A )
if isinstance(__A , __A ):
# check if don't have any quantized module on the cpu
a_ : str = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
a_ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : List[str] , __A : List[Any]=None , __A : List[Any]=None ) -> Any:
"""simple docstring"""
if modules_to_not_convert is None:
a_ : Union[str, Any] = []
a_ , a_ : List[str] = _replace_with_bnb_layers(
__A , __A , __A , __A )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : Tuple=None , __A : Any=None , ) -> Any:
"""simple docstring"""
a_ : int = False
for name, module in model.named_children():
if current_key_name is None:
a_ : List[str] = []
current_key_name.append(__A )
if isinstance(__A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
a_ : Union[str, Any] = '.'.join(__A )
a_ : Any = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
a_ : int = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
a_ : Union[str, Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
a_ : Optional[int] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
a_ : Union[str, Any] = module.weight.data
if module.bias is not None:
a_ : str = module.bias.data
bnb_module.requires_grad_(__A )
setattr(__A , __A , __A )
a_ : int = True
if len(list(module.children() ) ) > 0:
a_ , a_ : List[str] = _replace_with_bnb_layers(
__A , __A , __A , __A )
a_ : List[str] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Optional[int]:
"""simple docstring"""
with init_empty_weights():
a_ : List[str] = deepcopy(__A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
a_ : List[str] = find_tied_parameters(__A )
# For compatibility with Accelerate < 0.18
if isinstance(__A , __A ):
a_ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
a_ : Any = sum(__A , [] )
a_ : Optional[int] = len(__A ) > 0
# Check if it is a base model
a_ : Union[str, Any] = False
if hasattr(__A , 'base_model_prefix' ):
a_ : Union[str, Any] = not hasattr(__A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a_ : Tuple = list(model.named_children() )
a_ : str = [list_modules[-1][0]]
# add last module together with tied weights
a_ : List[str] = set(__A ) - set(__A )
a_ : Dict = list(set(__A ) ) + list(__A )
# remove ".weight" from the keys
a_ : List[str] = ['.weight', '.bias']
a_ : List[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a_ : Dict = name.replace(__A , '' )
filtered_module_names.append(__A )
return filtered_module_names
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Any:
"""simple docstring"""
for m in model.modules():
if isinstance(__A , bnb.nn.Linearabit ):
return True
return False
def SCREAMING_SNAKE_CASE_ ( __A : nn.Module ) -> Union[str, Any]:
"""simple docstring"""
return next(parameter.parameters() ).device
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : int , __A : Optional[Any] , __A : List[str] , __A : Dict , __A : Tuple , __A : Optional[Any] ) -> Dict:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(__A , __A , 0 , dtype=__A , value=__A )
a_ : Optional[int] = param_name
a_ : List[str] = model
if "." in tensor_name:
a_ : int = tensor_name.split('.' )
for split in splits[:-1]:
a_ : int = getattr(__A , __A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
a_ : Optional[Any] = new_module
a_ : List[str] = splits[-1]
# offload weights
a_ : Union[str, Any] = False
offload_weight(module._parameters[tensor_name] , __A , __A , index=__A )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , __A , index=__A , )
else:
offload_weight(__A , __A , __A , index=__A )
offload_weight(__A , param_name.replace('weight' , 'SCB' ) , __A , index=__A )
set_module_tensor_to_device(__A , __A , 'meta' , dtype=__A , value=torch.empty(*param.size() ) )
| 32 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
A_ : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
A_ : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
A_ : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case = SeqaSeqDataset
# Get datasets
__snake_case = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__snake_case = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case = train_result.metrics
__snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case = data_args.n_val
__snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__snake_case = test_output.metrics
__snake_case = data_args.n_test
if trainer.is_world_process_zero():
__snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__snake_case = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 350 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase_ = '''true'''
def lowerCamelCase_ ( _a : List[Any] , _a : List[str]=82 , _a : Tuple=16 ):
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ : int = RegressionModel()
UpperCAmelCase_ : List[Any] = deepcopy(_a )
UpperCAmelCase_ : Tuple = RegressionDataset(length=_a )
UpperCAmelCase_ : int = DataLoader(_a , batch_size=_a )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(_a , _a )
return model, ddp_model, dataloader
def lowerCamelCase_ ( _a : Accelerator , _a : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
UpperCAmelCase_ : int = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(_a : str ):
UpperCAmelCase_ : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_a , max_length=_a )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
_a , batched=_a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_a : List[str] ):
if use_longest:
return tokenizer.pad(_a , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(_a , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(_a , shuffle=_a , collate_fn=_a , batch_size=16 )
def lowerCamelCase_ ( _a : Any , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : int = Accelerator(dispatch_batches=_a , split_batches=_a )
UpperCAmelCase_ : Dict = get_dataloader(_a , not dispatch_batches )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=_a )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(_a , _a )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase_ ( _a : Optional[int] , _a : Optional[Any] , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = batch.values()
with torch.no_grad():
UpperCAmelCase_ : str = model(_a )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(_a )
targs.append(_a )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = torch.cat(_a ), torch.cat(_a )
return logits, targs
def lowerCamelCase_ ( _a : Accelerator , _a : str=82 , _a : str=False , _a : Dict=False , _a : Dict=16 ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_basic_setup(_a , _a , _a )
UpperCAmelCase_ , UpperCAmelCase_ : Any = generate_predictions(_a , _a , _a )
assert (
len(_a ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_a )}'''
def lowerCamelCase_ ( _a : bool = False , _a : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = evaluate.load("""glue""" , """mrpc""" )
UpperCAmelCase_ , UpperCAmelCase_ : str = get_mrpc_setup(_a , _a )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = setup["""no"""]
model.to(_a )
model.eval()
for batch in dataloader:
batch.to(_a )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**_a )
UpperCAmelCase_ : Any = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_a , references=batch["""labels"""] )
UpperCAmelCase_ : str = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : List[str] = model(**_a )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Union[str, Any] = batch["""labels"""]
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_a , references=_a )
UpperCAmelCase_ : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(_a , _a )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[int] = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(_a , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
UpperCAmelCase_ : str = Accelerator()
test_torch_metrics(_a , 512 )
accelerator.state._reset_state()
def lowerCamelCase_ ( _a : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 59 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A__: Optional[Any] = logging.get_logger(__name__)
class _a ( UpperCamelCase__):
"""simple docstring"""
def __init__( self: Union[str, Any] , *__lowerCamelCase: Dict , **__lowerCamelCase: str ):
'''simple docstring'''
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 149 |
import doctest
from collections import deque
import numpy as np
class _a :
"""simple docstring"""
def __init__( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: int = [2, 1, 2, -1]
UpperCamelCase__: Dict = [1, 2, 3, 4]
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = len(self.first_signal )
UpperCamelCase__: Optional[Any] = len(self.second_signal )
UpperCamelCase__: str = max(__lowerCamelCase , __lowerCamelCase )
# create a zero matrix of max_length x max_length
UpperCamelCase__: List[str] = [[0] * max_length for i in range(__lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__lowerCamelCase ):
UpperCamelCase__: Union[str, Any] = deque(self.second_signal )
rotated_signal.rotate(__lowerCamelCase )
for j, item in enumerate(__lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase__: int = np.matmul(np.transpose(__lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 149 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> set[str]:
__lowerCAmelCase , __lowerCAmelCase: str = set(__SCREAMING_SNAKE_CASE ), [start]
while stack:
__lowerCAmelCase: Optional[int] = stack.pop()
explored.add(__SCREAMING_SNAKE_CASE )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__SCREAMING_SNAKE_CASE )
return explored
__A = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 108 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """data2vec-vision"""
def __init__( self : Optional[int] , UpperCamelCase__ : str=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : str=1_2 , UpperCamelCase__ : Optional[Any]=3_0_7_2 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : str=1e-12 , UpperCamelCase__ : Dict=2_2_4 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : str=False , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=[3, 5, 7, 1_1] , UpperCamelCase__ : List[str]=[1, 2, 3, 6] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Any=0.4 , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : str=False , UpperCamelCase__ : Optional[int]=2_5_5 , **UpperCamelCase__ : Dict , )-> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase__)
__lowerCAmelCase: List[str] = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: Optional[int] = intermediate_size
__lowerCAmelCase: int = hidden_act
__lowerCAmelCase: Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase: Any = attention_probs_dropout_prob
__lowerCAmelCase: Dict = initializer_range
__lowerCAmelCase: Any = layer_norm_eps
__lowerCAmelCase: Union[str, Any] = image_size
__lowerCAmelCase: Tuple = patch_size
__lowerCAmelCase: List[str] = num_channels
__lowerCAmelCase: Optional[Any] = use_mask_token
__lowerCAmelCase: str = use_absolute_position_embeddings
__lowerCAmelCase: Optional[int] = use_relative_position_bias
__lowerCAmelCase: str = use_shared_relative_position_bias
__lowerCAmelCase: Union[str, Any] = layer_scale_init_value
__lowerCAmelCase: Any = drop_path_rate
__lowerCAmelCase: Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase: int = out_indices
__lowerCAmelCase: Any = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase: List[Any] = use_auxiliary_head
__lowerCAmelCase: int = auxiliary_loss_weight
__lowerCAmelCase: Dict = auxiliary_channels
__lowerCAmelCase: Any = auxiliary_num_convs
__lowerCAmelCase: Any = auxiliary_concat_input
__lowerCAmelCase: Any = semantic_loss_ignore_index
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Dict = version.parse("""1.11""" )
@property
def lowercase_ ( self : Optional[Any])-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def lowercase_ ( self : Any)-> float:
'''simple docstring'''
return 1e-4
| 108 | 1 |
'''simple docstring'''
def lowercase__ ( __lowercase : int , __lowercase : int ) -> int:
"""simple docstring"""
while a != 0:
__UpperCamelCase , __UpperCamelCase = b % a, a
return b
def lowercase__ ( __lowercase : int , __lowercase : int ) -> int:
"""simple docstring"""
if gcd(__lowercase , __lowercase ) != 1:
__UpperCamelCase = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(__lowercase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1, 0, a
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 1, m
while va != 0:
__UpperCamelCase = ua // va
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 53 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = ["""pixel_values"""]
def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None:
super().__init__(**_A )
__magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384}
__magic_name__ : Dict = get_size_dict(_A , default_to_square=_A )
__magic_name__ : List[Any] = do_resize
__magic_name__ : str = size
# Default value set here for backwards compatibility where the value in config is None
__magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256
__magic_name__ : int = resample
__magic_name__ : List[str] = do_rescale
__magic_name__ : List[Any] = rescale_factor
__magic_name__ : str = do_normalize
__magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
__magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
__magic_name__ : Dict = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__magic_name__ : Dict = int(shortest_edge / crop_pct )
__magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
__magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int:
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image:
__magic_name__ : int = do_resize if do_resize is not None else self.do_resize
__magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
__magic_name__ : Optional[Any] = resample if resample is not None else self.resample
__magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : str = image_mean if image_mean is not None else self.image_mean
__magic_name__ : Dict = image_std if image_std is not None else self.image_std
__magic_name__ : Dict = size if size is not None else self.size
__magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A )
__magic_name__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
__magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images]
if do_rescale:
__magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
__magic_name__ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A ) | 331 | 0 |
"""simple docstring"""
def A ( snake_case :int ) -> int:
__UpperCamelCase = abs(snake_case )
__UpperCamelCase = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def A ( snake_case :int ) -> int:
__UpperCamelCase = abs(snake_case )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def A ( snake_case :int ) -> int:
return sum(int(snake_case ) for c in str(abs(snake_case ) ) )
def A ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case :Callable , snake_case :int ) -> None:
__UpperCamelCase = f'{func.__name__}({value})'
__UpperCamelCase = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(snake_case )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(snake_case , snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 263 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : Dict = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "wavlm"
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=320 , __UpperCAmelCase=800 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.0_5 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=(512, 512, 512, 512, 1500) , __UpperCAmelCase=(5, 3, 3, 1, 1) , __UpperCAmelCase=(1, 2, 3, 1, 1) , __UpperCAmelCase=512 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
__UpperCamelCase = hidden_size
__UpperCamelCase = feat_extract_norm
__UpperCamelCase = feat_extract_activation
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = conv_bias
__UpperCamelCase = num_buckets
__UpperCamelCase = max_bucket_distance
__UpperCamelCase = num_conv_pos_embeddings
__UpperCamelCase = num_conv_pos_embedding_groups
__UpperCamelCase = len(self.conv_dim )
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = feat_proj_dropout
__UpperCamelCase = final_dropout
__UpperCamelCase = layerdrop
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = initializer_range
__UpperCamelCase = num_ctc_classes
__UpperCamelCase = vocab_size
__UpperCamelCase = do_stable_layer_norm
__UpperCamelCase = use_weighted_layer_sum
__UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
__UpperCamelCase = num_codevectors_per_group
__UpperCamelCase = num_codevector_groups
__UpperCamelCase = contrastive_logits_temperature
__UpperCamelCase = num_negatives
__UpperCamelCase = codevector_dim
__UpperCamelCase = proj_codevector_dim
__UpperCamelCase = diversity_loss_weight
# ctc loss
__UpperCamelCase = ctc_loss_reduction
__UpperCamelCase = ctc_zero_infinity
# adapter
__UpperCamelCase = add_adapter
__UpperCamelCase = adapter_kernel_size
__UpperCamelCase = adapter_stride
__UpperCamelCase = num_adapter_layers
__UpperCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 263 | 1 |
import math
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =F'Input value of [number={number}] must be an integer'
raise TypeError(SCREAMING_SNAKE_CASE__ )
if number < 1:
__UpperCamelCase =F'Input value of [number={number}] must be > 0'
raise ValueError(SCREAMING_SNAKE_CASE__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase =int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase =[3, 5]
__UpperCamelCase =2
__UpperCamelCase =3
for block in range(1 , SCREAMING_SNAKE_CASE__ ):
for _ in range(SCREAMING_SNAKE_CASE__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
_A = 0
try:
_A = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 62 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
_A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
_A = {'vinai/bartpho-syllable': 1024}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =monolingual_vocab_file
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__UpperCamelCase ={}
__UpperCamelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =cnt
cnt += 1
with open(A_ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
__UpperCamelCase =line.strip().split()[0]
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
__UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
__UpperCamelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A_ ) -> List[str]:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
__UpperCamelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self ) -> Any:
return len(self.fairseq_ids_to_tokens )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _a ( self , A_ ) -> int:
return self.fairseq_ids_to_tokens[index]
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =''.join(A_ ).replace(A_ , ' ' ).strip()
return out_string
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
A_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , A_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(A_ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(A_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 62 | 1 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
__A = ''''''
__A = ''''''
__A = ''''''
__A = ''''''
def lowercase_ ( _lowerCamelCase: str ) -> None:
'''simple docstring'''
__lowerCamelCase : str = tweepy.OAuthHandler(_lowerCamelCase , _lowerCamelCase )
auth.set_access_token(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Dict = tweepy.API(_lowerCamelCase )
# initialize a list to hold all the tweepy Tweets
__lowerCamelCase : List[str] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowerCamelCase : int = api.user_timeline(screen_name=_lowerCamelCase , count=200 )
# save most recent tweets
alltweets.extend(_lowerCamelCase )
# save the id of the oldest tweet less one
__lowerCamelCase : Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_lowerCamelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
__lowerCamelCase : int = api.user_timeline(
screen_name=_lowerCamelCase , count=200 , max_id=_lowerCamelCase )
# save most recent tweets
alltweets.extend(_lowerCamelCase )
# update the id of the oldest tweet less one
__lowerCamelCase : int = alltweets[-1].id - 1
print(F"""...{len(_lowerCamelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowerCamelCase : Dict = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
__lowerCamelCase : List[str] = csv.writer(_lowerCamelCase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_lowerCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''') | 64 | """simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowercase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Dict=False ) -> Any:
'''simple docstring'''
__lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCamelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: int , _lowerCamelCase: List[str]=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCamelCase : Any = ""
else:
__lowerCamelCase : Optional[int] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase : Optional[int] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
__lowerCamelCase : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
__lowerCamelCase : str = in_proj_bias[: config.hidden_size]
__lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase : str = in_proj_bias[-config.hidden_size :]
def lowercase_ ( _lowerCamelCase: int ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : Tuple = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Tuple ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : List[Any] = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: List[str] , _lowerCamelCase: Optional[int] ) -> Any:
'''simple docstring'''
__lowerCamelCase : str = dct.pop(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = val
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Tuple ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : int = ViTMSNConfig()
__lowerCamelCase : Dict = 1000
__lowerCamelCase : str = "datasets/huggingface/label-files"
__lowerCamelCase : Optional[int] = "imagenet-1k-id2label.json"
__lowerCamelCase : List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase ) , "r" ) )
__lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCamelCase : int = idalabel
__lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__lowerCamelCase : int = 384
__lowerCamelCase : Optional[int] = 1536
__lowerCamelCase : str = 6
elif "l16" in checkpoint_url:
__lowerCamelCase : Optional[Any] = 1024
__lowerCamelCase : str = 4096
__lowerCamelCase : Any = 24
__lowerCamelCase : Optional[int] = 16
__lowerCamelCase : Union[str, Any] = 0.1
elif "b4" in checkpoint_url:
__lowerCamelCase : Optional[Any] = 4
elif "l7" in checkpoint_url:
__lowerCamelCase : str = 7
__lowerCamelCase : int = 1024
__lowerCamelCase : int = 4096
__lowerCamelCase : Union[str, Any] = 24
__lowerCamelCase : Optional[int] = 16
__lowerCamelCase : List[Any] = 0.1
__lowerCamelCase : str = ViTMSNModel(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["target_encoder"]
__lowerCamelCase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(_lowerCamelCase )
__lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , base_model=_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__lowerCamelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCamelCase : Tuple = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
__lowerCamelCase : List[str] = ViTImageProcessor(
size=config.image_size , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__lowerCamelCase : Tuple = image_processor(images=_lowerCamelCase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
__lowerCamelCase : Optional[int] = model(**_lowerCamelCase )
__lowerCamelCase : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__lowerCamelCase : Any = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
__lowerCamelCase : Optional[Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
__lowerCamelCase : List[str] = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
__lowerCamelCase : str = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
__lowerCamelCase : Optional[int] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _lowerCamelCase , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 64 | 1 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__A = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__A = get_tests_dir("fixtures/vocab.json")
__A = get_tests_dir("fixtures")
class lowerCamelCase__ ( unittest.TestCase ):
a__ : Dict = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[Any] = 0
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : Tuple = WavaVecaConfig()
snake_case : Any = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
snake_case : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , "vocab.json" ) )
snake_case : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : Tuple = WavaVecaFeatureExtractor()
snake_case : List[str] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
snake_case : List[str] = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# drop `processor_class` in tokenizer
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , "r" ) as f:
snake_case : Any = json.load(SCREAMING_SNAKE_CASE )
config_dict.pop("processor_class" )
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , "w" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) )
snake_case : Any = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : List[str] = WavaVecaFeatureExtractor()
snake_case : Dict = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
snake_case : Optional[int] = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# drop `processor_class` in feature extractor
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , "r" ) as f:
snake_case : Optional[Any] = json.load(SCREAMING_SNAKE_CASE )
config_dict.pop("processor_class" )
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , "w" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) )
snake_case : Dict = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(SCREAMING_SNAKE_CASE )
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , "w" ) as f:
f.write("{}" )
snake_case : Tuple = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE ):
snake_case : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE ):
snake_case : Optional[int] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
snake_case : Tuple = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
snake_case : int = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
snake_case : Optional[Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE )
snake_case : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
try:
AutoConfig.register("custom" , SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE ):
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case : Optional[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : List[str] = os.path.join(SCREAMING_SNAKE_CASE , "vocab.txt" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case : List[Any] = CustomTokenizer(SCREAMING_SNAKE_CASE )
snake_case : Tuple = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(SCREAMING_SNAKE_CASE )
snake_case : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self ):
"""simple docstring"""
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Optional[int] = False
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : List[Any] = False
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Tuple = """AutoFeatureExtractor"""
a__ : Union[str, Any] = """AutoTokenizer"""
a__ : int = False
try:
AutoConfig.register("custom" , SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local classes.
snake_case : int = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case : Optional[int] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Tuple = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class lowerCamelCase__ ( unittest.TestCase ):
a__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def lowerCamelCase_ ( cls ):
"""simple docstring"""
snake_case : Optional[int] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase_ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE , "test-processor" ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token )
snake_case : int = WavaVecaProcessor.from_pretrained(F'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE , "test-processor-org" ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization="valid_org" , )
snake_case : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCamelCase_ ( self ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case : str = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Dict = os.path.join(SCREAMING_SNAKE_CASE , "vocab.txt" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case : str = CustomTokenizer(SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'''{USER}/test-dynamic-processor''' , token=self._token )
snake_case : Union[str, Any] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(SCREAMING_SNAKE_CASE , "tokenizer_config.json" ) ) as f:
snake_case : Tuple = json.load(SCREAMING_SNAKE_CASE )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , "custom_processing.py" ) ) )
repo.push_to_hub()
snake_case : List[str] = AutoProcessor.from_pretrained(F'''{USER}/test-dynamic-processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 148 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_lowerCAmelCase : List[str] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class _UpperCamelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Dict , lowerCamelCase :str , lowerCamelCase :bool , lowerCamelCase :str = None , lowerCamelCase :list = None ) -> Tuple:
UpperCAmelCase__ = None
UpperCAmelCase__ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
UpperCAmelCase__ = os.path.abspath("examples" )
for item in os.listdir(lowerCamelCase ):
if item not in EXCLUDE_EXAMPLES:
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.isfile(lowerCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCamelCase , feature_script=lowerCamelCase , tested_section="main()" if parser_only else "training_function()" , ):
UpperCAmelCase__ = compare_against_test(
os.path.join(lowerCamelCase , lowerCamelCase ) , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = "\n".join(lowerCamelCase )
if special_strings is not None:
for string in special_strings:
UpperCAmelCase__ = diff.replace(lowerCamelCase , "" )
self.assertEqual(lowerCamelCase , "" )
def UpperCAmelCase_ ( self :List[str] ) -> Any:
self.one_complete_example("complete_nlp_example.py" , lowerCamelCase )
self.one_complete_example("complete_nlp_example.py" , lowerCamelCase )
def UpperCAmelCase_ ( self :str ) -> int:
UpperCAmelCase__ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
UpperCAmelCase__ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.one_complete_example("complete_cv_example.py" , lowerCamelCase , lowerCamelCase , lowerCamelCase )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = False
@classmethod
def UpperCAmelCase_ ( cls :List[Any] ) -> Any:
super().setUpClass()
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase__ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def UpperCAmelCase_ ( cls :Union[str, Any] ) -> Optional[int]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase_ ( self :Dict ) -> Dict:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def UpperCAmelCase_ ( self :Optional[int] ) -> Any:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
UpperCAmelCase__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def UpperCAmelCase_ ( self :Tuple ) -> Dict:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
UpperCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase )
self.assertNotIn("epoch 0:" , lowerCamelCase )
self.assertIn("epoch 1:" , lowerCamelCase )
def UpperCAmelCase_ ( self :Dict ) -> int:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
UpperCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase )
if torch.cuda.is_available():
UpperCAmelCase__ = torch.cuda.device_count()
else:
UpperCAmelCase__ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , lowerCamelCase )
self.assertIn("epoch 1:" , lowerCamelCase )
else:
self.assertIn("epoch 0:" , lowerCamelCase )
self.assertIn("epoch 1:" , lowerCamelCase )
@slow
def UpperCAmelCase_ ( self :Dict ) -> Optional[int]:
UpperCAmelCase__ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
UpperCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase )
UpperCAmelCase__ = re.findall("({.+})" , lowerCamelCase )
UpperCAmelCase__ = [r for r in results if "accuracy" in r][-1]
UpperCAmelCase__ = ast.literal_eval(lowerCamelCase )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def UpperCAmelCase_ ( self :int ) -> Optional[int]:
UpperCAmelCase__ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase_ ( self :List[Any] ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
UpperCAmelCase__ = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "tracking" ) ) )
def UpperCAmelCase_ ( self :Any ) -> Dict:
UpperCAmelCase__ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def UpperCAmelCase_ ( self :Any ) -> Optional[int]:
UpperCAmelCase__ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 169 | 0 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__a = 'CompVis/stable-diffusion-v1-1'
__a = 'CompVis/stable-diffusion-v1-2'
__a = 'CompVis/stable-diffusion-v1-3'
__a = 'CompVis/stable-diffusion-v1-4'
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : AutoencoderKL , lowerCAmelCase__ : CLIPTextModel , lowerCAmelCase__ : CLIPTokenizer , lowerCAmelCase__ : UNetaDConditionModel , lowerCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase__ : StableDiffusionSafetyChecker , lowerCAmelCase__ : CLIPImageProcessor , lowerCAmelCase__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
super()._init_()
_UpperCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : Any = StableDiffusionPipeline(
vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , requires_safety_checker=lowerCAmelCase__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _lowerCAmelCase ( self : int ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , lowerCAmelCase__ ) for k in self.config.keys() if not k.startswith("_" )}
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCAmelCase : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.enable_attention_slicing(lowerCAmelCase__ )
@torch.no_grad()
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : Union[str, Any] , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , )
@torch.no_grad()
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : Any , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , )
@torch.no_grad()
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : int , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , )
@torch.no_grad()
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : int , ) -> Optional[Any]:
"""simple docstring"""
return self.pipea(
prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , )
@torch.no_grad()
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : str , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
self.to(lowerCAmelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
_UpperCAmelCase : Tuple = self.textaimg_sda_a(
prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
_UpperCAmelCase : Dict = self.textaimg_sda_a(
prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
_UpperCAmelCase : Tuple = self.textaimg_sda_a(
prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
_UpperCAmelCase : Optional[Any] = self.textaimg_sda_a(
prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] ) | 17 | '''simple docstring'''
from math import factorial
def __UpperCAmelCase ( a_: int = 100 ):
return sum(map(a_, str(factorial(a_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip()))) | 17 | 1 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def snake_case ( self , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = np.random.RandomState(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = self.get_dummy_inputs()
lowerCAmelCase__ :Union[str, Any] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Any = np.array([0.6_50_72, 0.5_84_92, 0.4_82_19, 0.5_55_21, 0.5_31_80, 0.5_59_39, 0.5_06_97, 0.3_98_00, 0.4_64_55] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :Dict = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_inputs()
lowerCAmelCase__ :Tuple = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :List[Any] = np.array([0.6_58_63, 0.5_94_25, 0.4_93_26, 0.5_63_13, 0.5_38_75, 0.5_66_27, 0.5_10_65, 0.3_97_77, 0.4_63_30] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :int = self.get_dummy_inputs()
lowerCAmelCase__ :List[Any] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Optional[Any] = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :str = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = self.get_dummy_inputs()
lowerCAmelCase__ :List[Any] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Union[str, Any] = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :Union[str, Any] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Tuple = np.array([0.5_38_17, 0.6_08_12, 0.4_73_84, 0.4_95_30, 0.5_18_94, 0.4_98_14, 0.4_79_84, 0.3_89_58, 0.4_42_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_inputs()
lowerCAmelCase__ :Dict = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :List[Any] = np.array([0.5_38_95, 0.6_08_08, 0.4_79_33, 0.4_96_08, 0.5_18_86, 0.4_99_50, 0.4_80_53, 0.3_89_57, 0.4_42_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :str = self.get_dummy_inputs()
lowerCAmelCase__ :List[str] = 3 * [inputs['prompt']]
# forward
lowerCAmelCase__ :Dict = pipe(**__UpperCAmelCase )
lowerCAmelCase__ :str = output.images[0, -3:, -3:, -1]
lowerCAmelCase__ :List[Any] = self.get_dummy_inputs()
lowerCAmelCase__ :Optional[Any] = 3 * [inputs.pop('prompt' )]
lowerCAmelCase__ :Tuple = pipe.tokenizer(
__UpperCAmelCase , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='np' , )
lowerCAmelCase__ :Optional[Any] = text_inputs['input_ids']
lowerCAmelCase__ :Optional[Any] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
lowerCAmelCase__ :Tuple = prompt_embeds
# forward
lowerCAmelCase__ :Any = pipe(**__UpperCAmelCase )
lowerCAmelCase__ :List[str] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.get_dummy_inputs()
lowerCAmelCase__ :Optional[Any] = 3 * ['this is a negative prompt']
lowerCAmelCase__ :Optional[Any] = negative_prompt
lowerCAmelCase__ :List[Any] = 3 * [inputs['prompt']]
# forward
lowerCAmelCase__ :Optional[Any] = pipe(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = output.images[0, -3:, -3:, -1]
lowerCAmelCase__ :str = self.get_dummy_inputs()
lowerCAmelCase__ :int = 3 * [inputs.pop('prompt' )]
lowerCAmelCase__ :Any = []
for p in [prompt, negative_prompt]:
lowerCAmelCase__ :Tuple = pipe.tokenizer(
__UpperCAmelCase , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='np' , )
lowerCAmelCase__ :Any = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
lowerCAmelCase__ , lowerCAmelCase__ :str = embeds
# forward
lowerCAmelCase__ :str = pipe(**__UpperCAmelCase )
lowerCAmelCase__ :Dict = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = ort.SessionOptions()
lowerCAmelCase__ :str = False
return options
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
lowerCAmelCase__ :Optional[Any] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type='np' )
lowerCAmelCase__ :Optional[Any] = output.images
lowerCAmelCase__ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :Tuple = np.array([0.04_52, 0.03_90, 0.00_87, 0.03_50, 0.06_17, 0.03_64, 0.05_44, 0.05_23, 0.07_20] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowerCAmelCase__ :List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 'open neural network exchange'
lowerCAmelCase__ :Any = np.random.RandomState(0 )
lowerCAmelCase__ :Optional[int] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__UpperCAmelCase , output_type='np' )
lowerCAmelCase__ :int = output.images
lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :Dict = np.array([0.28_67, 0.19_74, 0.14_81, 0.72_94, 0.72_51, 0.66_67, 0.41_94, 0.56_42, 0.64_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowerCAmelCase__ :Tuple = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = 'open neural network exchange'
lowerCAmelCase__ :str = np.random.RandomState(0 )
lowerCAmelCase__ :Optional[Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__UpperCAmelCase , output_type='np' )
lowerCAmelCase__ :Any = output.images
lowerCAmelCase__ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :List[str] = np.array([0.23_06, 0.19_59, 0.15_93, 0.65_49, 0.63_94, 0.54_08, 0.50_65, 0.60_10, 0.61_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = 0
def test_callback_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> None:
lowerCAmelCase__ :Optional[int] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCAmelCase__ :Optional[Any] = latents[0, -3:, -3:, -1]
lowerCAmelCase__ :Optional[int] = np.array(
[-0.67_72, -0.38_35, -1.24_56, 0.19_05, -1.09_74, 0.69_67, -1.93_53, 0.01_78, 1.01_67] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCAmelCase__ :List[str] = latents[0, -3:, -3:, -1]
lowerCAmelCase__ :Union[str, Any] = np.array(
[-0.33_51, 0.22_41, -0.18_37, -0.23_25, -0.65_77, 0.33_93, -0.02_41, 0.58_99, 1.38_75] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
lowerCAmelCase__ :Any = False
lowerCAmelCase__ :Tuple = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = 'Andromeda galaxy in a bottle'
lowerCAmelCase__ :Tuple = np.random.RandomState(0 )
pipe(
prompt=__UpperCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert pipe.safety_checker is None
lowerCAmelCase__ :int = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = OnnxStableDiffusionPipeline.from_pretrained(__UpperCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase__ :Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
| 293 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
lowerCAmelCase__ :Union[str, Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) )
return round(_SCREAMING_SNAKE_CASE , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 293 | 1 |
"""simple docstring"""
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = len(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = [0] * len_array
if len_array > 0:
UpperCAmelCase__ : str = array[0]
for i in range(1 , _lowerCamelCase ):
UpperCAmelCase__ : Dict = self.prefix_sum[i - 1] + array[i]
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(_lowerCamelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 166 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'OwlViTImageProcessor'
SCREAMING_SNAKE_CASE = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__(self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _lowerCamelCase , )
UpperCAmelCase__ : Optional[int] = kwargs.pop("""feature_extractor""" )
UpperCAmelCase__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__(self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="max_length" , _lowerCamelCase="np" , **_lowerCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(_lowerCamelCase , _lowerCamelCase ) or (isinstance(_lowerCamelCase , _lowerCamelCase ) and not isinstance(text[0] , _lowerCamelCase )):
UpperCAmelCase__ : Any = [self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )]
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(text[0] , _lowerCamelCase ):
UpperCAmelCase__ : Any = []
# Maximum number of queries across batch
UpperCAmelCase__ : int = max([len(_lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowerCamelCase ) != max_num_queries:
UpperCAmelCase__ : Optional[int] = t + [""" """] * (max_num_queries - len(_lowerCamelCase ))
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
encodings.append(_lowerCamelCase )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
UpperCAmelCase__ : Optional[Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Any = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ : Any = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : List[str] = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ : str = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
UpperCAmelCase__ : int = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ : Any = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Tuple = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
UpperCAmelCase__ : Dict = BatchEncoding()
UpperCAmelCase__ : int = input_ids
UpperCAmelCase__ : Optional[int] = attention_mask
if query_images is not None:
UpperCAmelCase__ : int = BatchEncoding()
UpperCAmelCase__ : Optional[int] = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ).pixel_values
UpperCAmelCase__ : List[Any] = query_pixel_values
if images is not None:
UpperCAmelCase__ : Any = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and images is not None:
UpperCAmelCase__ : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def _a (self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _lowerCamelCase , )
return self.image_processor_class
@property
def _a (self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _lowerCamelCase , )
return self.image_processor
| 166 | 1 |
import os
from collections.abc import Iterator
def A_ ( A__ = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(A__ ):
a__ : Dict = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A__ )[1] in (".py", ".ipynb"):
yield os.path.join(A__ , A__ ).lstrip('./' )
def A_ ( A__ ) -> List[str]:
return F'{i * " "}*' if i else "\n##"
def A_ ( A__ , A__ ) -> str:
a__ : Any = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def A_ ( A__ = "." ) -> None:
a__ : Optional[Any] = ''
for filepath in sorted(good_file_paths(A__ ) ):
a__ , a__ : Union[str, Any] = os.path.split(A__ )
if filepath != old_path:
a__ : Tuple = print_path(A__ , A__ )
a__ : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
a__ : Any = F'{filepath}/{filename}'.replace(' ' , '%20' )
a__ : List[Any] = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(A__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md(""".""")
| 99 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : List[Any] = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99 | 1 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
SCREAMING_SNAKE_CASE_ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: List[str] , UpperCamelCase: Optional[int] , UpperCamelCase: str=7_68 ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = proj_size
A__ = CLIPVisionModel(UpperCamelCase )
A__ = PaintByExampleMapper(UpperCamelCase )
A__ = nn.LayerNorm(config.hidden_size )
A__ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
A__ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: int , UpperCamelCase: Optional[Any]=False ):
"""simple docstring"""
A__ = self.model(pixel_values=UpperCamelCase )
A__ = clip_output.pooler_output
A__ = self.mapper(latent_states[:, None] )
A__ = self.final_layer_norm(UpperCamelCase )
A__ = self.proj_out(UpperCamelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Optional[int] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__()
A__ = (config.num_hidden_layers + 1) // 5
A__ = config.hidden_size
A__ = 1
A__ = nn.ModuleList(
[
BasicTransformerBlock(UpperCamelCase , UpperCamelCase , UpperCamelCase , activation_fn="""gelu""" , attention_bias=UpperCamelCase )
for _ in range(UpperCamelCase )
] )
def UpperCamelCase ( self: Dict , UpperCamelCase: str ):
"""simple docstring"""
for block in self.blocks:
A__ = block(UpperCamelCase )
return hidden_states
| 69 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: int , *UpperCamelCase: Optional[Any] , **UpperCamelCase: str ):
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 69 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A , __A , __A ) -> List[str]:
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__A , n - 1 , __A ) * a) % mod
else:
UpperCamelCase__ = binary_exponentiation(__A , n / 2 , __A )
return (b * b) % mod
# a prime number
a__ : List[str] = 7_0_1
a__ : Optional[int] = 1_0_0_0_0_0_0_0_0_0
a__ : Tuple = 1_0
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 80 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
__A = "path-to-your-trained-model"
__A = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
__A = "A photo of sks dog in a bucket"
__A = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 148 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("RGB" )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowerCamelCase_ = transform(lowerCamelCase__ ).unsqueeze(0 ).to(lowerCamelCase__ )
return image
def lowerCamelCase_ ( lowerCamelCase__ ):
if "visual_encoder" in key:
lowerCamelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , lowerCamelCase__ )
if "blocks" in key:
lowerCamelCase_ = re.sub(r"blocks" , "layers" , lowerCamelCase__ )
if "attn" in key:
lowerCamelCase_ = re.sub(r"attn" , "self_attn" , lowerCamelCase__ )
if "norm1" in key:
lowerCamelCase_ = re.sub(r"norm1" , "layer_norm1" , lowerCamelCase__ )
if "norm2" in key:
lowerCamelCase_ = re.sub(r"norm2" , "layer_norm2" , lowerCamelCase__ )
if "encoder.norm" in key:
lowerCamelCase_ = re.sub(r"encoder.norm" , "post_layernorm" , lowerCamelCase__ )
if "encoder.patch_embed.proj" in key:
lowerCamelCase_ = re.sub(r"encoder.patch_embed.proj" , "embeddings.patch_embedding" , lowerCamelCase__ )
if "encoder.pos_embed" in key:
lowerCamelCase_ = re.sub(r"encoder.pos_embed" , "embeddings.position_embedding" , lowerCamelCase__ )
if "encoder.cls_token" in key:
lowerCamelCase_ = re.sub(r"encoder.cls_token" , "embeddings.class_embedding" , lowerCamelCase__ )
if "self_attn" in key:
lowerCamelCase_ = re.sub(r"self_attn.proj" , "self_attn.projection" , lowerCamelCase__ )
return key
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=None ):
if config_path is not None:
lowerCamelCase_ = BlipConfig.from_pretrained(lowerCamelCase__ )
else:
lowerCamelCase_ = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
lowerCamelCase_ = BlipForConditionalGeneration(lowerCamelCase__ ).eval()
lowerCamelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
lowerCamelCase_ = blip_decoder(pretrained=lowerCamelCase__ , image_size=3_8_4 , vit="base" )
lowerCamelCase_ = pt_model.eval()
lowerCamelCase_ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowerCamelCase_ = modified_state_dict.pop(lowerCamelCase__ )
lowerCamelCase_ = rename_key(lowerCamelCase__ )
lowerCamelCase_ = value
hf_model.load_state_dict(lowerCamelCase__ )
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = load_demo_image(image_size=lowerCamelCase__ , device="cpu" )
lowerCamelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
lowerCamelCase_ = tokenizer(["a picture of"] ).input_ids
lowerCamelCase_ = hf_model.generate(lowerCamelCase__ , lowerCamelCase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
lowerCamelCase_ = hf_model.generate(lowerCamelCase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowerCamelCase_ = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
lowerCamelCase_ = blip_vqa(pretrained=lowerCamelCase__ , image_size=lowerCamelCase__ , vit="base" )
vqa_model.eval()
lowerCamelCase_ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowerCamelCase_ = modified_state_dict.pop(lowerCamelCase__ )
lowerCamelCase_ = rename_key(lowerCamelCase__ )
lowerCamelCase_ = value
lowerCamelCase_ = BlipForQuestionAnswering(lowerCamelCase__ )
hf_vqa_model.load_state_dict(lowerCamelCase__ )
lowerCamelCase_ = ["How many dogs are in this image?"]
lowerCamelCase_ = tokenizer(lowerCamelCase__ , return_tensors="pt" ).input_ids
lowerCamelCase_ = hf_vqa_model.generate(lowerCamelCase__ , lowerCamelCase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
lowerCamelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
lowerCamelCase_ = blip_itm(pretrained=lowerCamelCase__ , image_size=lowerCamelCase__ , vit="base" )
itm_model.eval()
lowerCamelCase_ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowerCamelCase_ = modified_state_dict.pop(lowerCamelCase__ )
lowerCamelCase_ = rename_key(lowerCamelCase__ )
lowerCamelCase_ = value
lowerCamelCase_ = BlipForImageTextRetrieval(lowerCamelCase__ )
lowerCamelCase_ = ["A picture of a woman with a dog sitting in a beach"]
lowerCamelCase_ = tokenizer(
lowerCamelCase__ , return_tensors="pt" , padding="max_length" , truncation=lowerCamelCase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase__ )
hf_itm_model.eval()
lowerCamelCase_ = hf_itm_model(lowerCamelCase__ , lowerCamelCase__ , use_itm_head=lowerCamelCase__ )
lowerCamelCase_ = hf_itm_model(lowerCamelCase__ , lowerCamelCase__ , use_itm_head=lowerCamelCase__ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__A =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 47 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__A =True
except ImportError:
__A =False
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_ ( lowerCamelCase__ ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
lowerCamelCase_ = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=lowercase , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=lowercase , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=lowercase )
def __init__( self , lowercase , lowercase , lowercase=None , *lowercase ) -> List[str]:
lowerCamelCase_ = testing
lowerCamelCase_ = testing_file
lowerCamelCase_ = path
def SCREAMING_SNAKE_CASE_( self ) -> str:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCamelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(lowercase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
lowerCamelCase_ = (
Path(lowercase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCamelCase_ = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowercase ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
lowerCamelCase_ = json.load(lowercase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowercase , extra_context=lowercase , )
lowerCamelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
lowerCamelCase_ = json.load(lowercase )
lowerCamelCase_ = configuration["lowercase_modelname"]
lowerCamelCase_ = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
lowerCamelCase_ = "PyTorch" in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = "TensorFlow" in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = "Flax" in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(lowercase , exist_ok=lowercase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=lowercase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , "w" ):
pass
shutil.move(
f'{directory}/__init__.py' , f'{model_dir}/__init__.py' , )
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' , f'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(lowercase ):
with open(lowercase , "r" ) as f:
lowerCamelCase_ = f.readlines()
with open(lowercase , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowercase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' , f'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' , f'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' , f'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' , f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowercase , lowercase , lowercase ):
# Create temp file
lowerCamelCase_ , lowerCamelCase_ = mkstemp()
lowerCamelCase_ = False
with fdopen(lowercase , "w" ) as new_file:
with open(lowercase ) as old_file:
for line in old_file:
new_file.write(lowercase )
if line_to_copy_below in line:
lowerCamelCase_ = True
for line_to_copy in lines_to_copy:
new_file.write(lowercase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(lowercase , lowercase )
# Remove original file
remove(lowercase )
# Move new file
move(lowercase , lowercase )
def skip_units(lowercase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowercase ):
with open(lowercase ) as datafile:
lowerCamelCase_ = []
lowerCamelCase_ = False
lowerCamelCase_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCamelCase_ = line.split("\"" )[1]
lowerCamelCase_ = skip_units(lowercase )
elif "# Below: " in line and "##" not in line:
lowerCamelCase_ = line.split("\"" )[1]
lowerCamelCase_ = skip_units(lowercase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowercase , lowercase , lowercase )
lowerCamelCase_ = []
elif "# Replace with" in line and "##" not in line:
lowerCamelCase_ = []
elif "##" not in line:
lines_to_copy.append(lowercase )
remove(lowercase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(lowercase )
| 47 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = """roberta"""
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any]=5_02_65 , __lowerCamelCase : Optional[Any]=7_68 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : int=30_72 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : int=5_12 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Union[str, Any]=1e-12 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Optional[int]="absolute" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=None , **__lowerCamelCase : List[str] , ) -> Tuple:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = use_cache
a = classifier_dropout
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a = {0: "batch", 1: "choice", 2: "sequence"}
else:
a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 107 |
'''simple docstring'''
import math
def a__ ( lowercase : list, lowercase : int = 0, lowercase : int = 0 ) -> list:
"""simple docstring"""
_UpperCamelCase = end or len(lowercase )
for i in range(lowercase, lowercase ):
_UpperCamelCase = i
_UpperCamelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCamelCase = array[temp_index - 1]
temp_index -= 1
_UpperCamelCase = temp_index_value
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int ) -> None: # Max Heap
"""simple docstring"""
_UpperCamelCase = index
_UpperCamelCase = 2 * index + 1 # Left Node
_UpperCamelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCamelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCamelCase = right_index
if largest != index:
_UpperCamelCase , _UpperCamelCase = array[largest], array[index]
heapify(lowercase, lowercase, lowercase )
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
_UpperCamelCase = len(lowercase )
for i in range(n // 2, -1, -1 ):
heapify(lowercase, lowercase, lowercase )
for i in range(n - 1, 0, -1 ):
_UpperCamelCase , _UpperCamelCase = array[0], array[i]
heapify(lowercase, 0, lowercase )
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
_UpperCamelCase = low
_UpperCamelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCamelCase , _UpperCamelCase = array[j], array[i]
i += 1
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
if len(lowercase ) == 0:
return array
_UpperCamelCase = 2 * math.ceil(math.loga(len(lowercase ) ) )
_UpperCamelCase = 16
return intro_sort(lowercase, 0, len(lowercase ), lowercase, lowercase )
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int, lowercase : int ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase )
max_depth -= 1
_UpperCamelCase = median_of_a(lowercase, lowercase, start + ((end - start) // 2) + 1, end - 1 )
_UpperCamelCase = partition(lowercase, lowercase, lowercase, lowercase )
intro_sort(lowercase, lowercase, lowercase, lowercase, lowercase )
_UpperCamelCase = p
return insertion_sort(lowercase, lowercase, lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Any = input('Enter numbers separated by a comma : ').strip()
lowercase__ : Any = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 324 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ : List[Any] =None
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
lowerCAmelCase__ : Any ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : List[str] ={
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Dict ={
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
lowerCAmelCase__ : Optional[Any] =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Dict = VOCAB_FILES_NAMES
UpperCamelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : str = NllbTokenizer
UpperCamelCase__ : List[int] = []
UpperCamelCase__ : List[int] = []
def __init__( self , _A=None , _A=None , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=None , _A=None , _A=None , _A=False , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
__SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , legacy_behaviour=_A , **_A , )
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
__SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(_A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else 'eng_Latn'
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
__SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _A ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _A ( self , _A , _A = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self , _A , _A , _A , _A , **_A ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__SCREAMING_SNAKE_CASE = src_lang
__SCREAMING_SNAKE_CASE = self(_A , add_special_tokens=_A , return_tensors=_A , **_A )
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
__SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def _A ( self , _A , _A = "eng_Latn" , _A = None , _A = "fra_Latn" , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = src_lang
__SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(_A , _A , **_A )
def _A ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _A ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
if self.legacy_behaviour:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
__SCREAMING_SNAKE_CASE = [self.cur_lang_code]
__SCREAMING_SNAKE_CASE = [self.eos_token_id]
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
__SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
if self.legacy_behaviour:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
__SCREAMING_SNAKE_CASE = [self.cur_lang_code]
__SCREAMING_SNAKE_CASE = [self.eos_token_id]
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
__SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _A ( self , _A , _A = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 356 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A = None , _A = None , _A = False , **_A , ):
'''simple docstring'''
super().__init__(features=_A , cache_dir=_A , keep_in_memory=_A , **_A )
__SCREAMING_SNAKE_CASE = Sql(
cache_dir=_A , features=_A , sql=_A , con=_A , **_A , )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , )
# Build dataset for splits
__SCREAMING_SNAKE_CASE = self.builder.as_dataset(
split='train' , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A , _A , _A = None , _A = None , **_A , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = name
__SCREAMING_SNAKE_CASE = con
__SCREAMING_SNAKE_CASE = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__SCREAMING_SNAKE_CASE = num_proc
__SCREAMING_SNAKE_CASE = to_sql_kwargs
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.to_sql_kwargs.pop('sql' , _A )
__SCREAMING_SNAKE_CASE = self.to_sql_kwargs.pop('con' , _A )
__SCREAMING_SNAKE_CASE = self.to_sql_kwargs.pop('index' , _A )
__SCREAMING_SNAKE_CASE = self._write(index=_A , **self.to_sql_kwargs )
return written
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = args
__SCREAMING_SNAKE_CASE = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
__SCREAMING_SNAKE_CASE = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
__SCREAMING_SNAKE_CASE = batch.to_pandas()
__SCREAMING_SNAKE_CASE = df.to_sql(self.name , self.con , index=_A , **_A )
return num_rows or len(_A )
def _A ( self , _A , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 118 | 0 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
_lowercase =Mock()
_lowercase =conn, Mock()
_lowercase =iter([1, None] )
_lowercase =lambda __snake_case : next(_lowerCAmelCase )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=_lowerCAmelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 5 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_:List[Any] = TypeVar("""KEY""")
SCREAMING_SNAKE_CASE_:Dict = TypeVar("""VAL""")
@dataclass(frozen=SCREAMING_SNAKE_CASE__ , slots=SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( Generic[KEY, VAL] ):
'''simple docstring'''
__lowerCamelCase : KEY
__lowerCamelCase : VAL
class SCREAMING_SNAKE_CASE__ ( _Item ):
'''simple docstring'''
def __init__( self ):
super().__init__(lowerCamelCase__, lowerCamelCase__ )
def __bool__( self ):
return False
SCREAMING_SNAKE_CASE_:Optional[Any] = _DeletedItem()
class SCREAMING_SNAKE_CASE__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ = 8, lowerCamelCase__ = 0.75 ):
A : List[str] = initial_block_size
A : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
A : Dict = capacity_factor
A : Optional[Any] = 0
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return hash(lowerCamelCase__ ) % len(self._buckets )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return (ind + 1) % len(self._buckets )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : List[Any] = self._buckets[ind]
if not stored:
A : Optional[int] = _Item(lowerCamelCase__, lowerCamelCase__ )
self._len += 1
return True
elif stored.key == key:
A : int = _Item(lowerCamelCase__, lowerCamelCase__ )
return True
else:
return False
def _lowerCAmelCase ( self ):
A : Union[str, Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase__ )
def _lowerCAmelCase ( self ):
if len(self._buckets ) <= self._initial_block_size:
return False
A : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._buckets
A : Optional[Any] = [None] * new_size
A : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def _lowerCAmelCase ( self ):
self._resize(len(self._buckets ) * 2 )
def _lowerCAmelCase ( self ):
self._resize(len(self._buckets ) // 2 )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Union[str, Any] = self._get_bucket_index(lowerCamelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
A : Dict = self._get_next_ind(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
for ind in self._iterate_buckets(lowerCamelCase__ ):
if self._try_set(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
break
def __setitem__( self, lowerCamelCase__, lowerCamelCase__ ):
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase__, lowerCamelCase__ )
def __delitem__( self, lowerCamelCase__ ):
for ind in self._iterate_buckets(lowerCamelCase__ ):
A : Tuple = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase__ )
if item is _deleted:
continue
if item.key == key:
A : int = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self, lowerCamelCase__ ):
for ind in self._iterate_buckets(lowerCamelCase__ ):
A : int = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase__ )
def __len__( self ):
return self._len
def __iter__( self ):
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
A : Union[str, Any] = """ ,""".join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 116 | 0 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 353 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__snake_case :Tuple = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
requires_backends(self , '''vision''')
self.check_model_type(__SCREAMING_SNAKE_CASE)
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, "Image.Image", List[Dict[str, Any]]] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] = None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
if "text_queries" in kwargs:
__a = kwargs.pop('''text_queries''')
if isinstance(__SCREAMING_SNAKE_CASE , (str, Image.Image)):
__a = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__a = image
__a = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
return results
def _lowerCamelCase ( self : str , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = {}
if "threshold" in kwargs:
__a = kwargs['''threshold''']
if "top_k" in kwargs:
__a = kwargs['''top_k''']
return {}, {}, postprocess_params
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = load_image(inputs['''image'''])
__a = inputs['''candidate_labels''']
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = candidate_labels.split(''',''')
__a = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(__SCREAMING_SNAKE_CASE):
__a = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
__a = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
yield {
"is_last": i == len(__SCREAMING_SNAKE_CASE) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = model_inputs.pop('''target_size''')
__a = model_inputs.pop('''candidate_label''')
__a = model_inputs.pop('''is_last''')
__a = self.model(**__SCREAMING_SNAKE_CASE)
__a = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None):
'''simple docstring'''
__a = []
for model_output in model_outputs:
__a = model_output['''candidate_label''']
__a = BaseModelOutput(__SCREAMING_SNAKE_CASE)
__a = self.image_processor.post_process_object_detection(
outputs=__SCREAMING_SNAKE_CASE , threshold=__SCREAMING_SNAKE_CASE , target_sizes=model_output['''target_size'''])[0]
for index in outputs["scores"].nonzero():
__a = outputs['''scores'''][index].item()
__a = self._get_bounding_box(outputs['''boxes'''][index][0])
__a = {'''score''': score, '''label''': label, '''box''': box}
results.append(__SCREAMING_SNAKE_CASE)
__a = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE: x["score"] , reverse=__SCREAMING_SNAKE_CASE)
if top_k:
__a = results[:top_k]
return results
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : "torch.Tensor"):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''')
__a , __a , __a , __a = box.int().tolist()
__a = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 131 | 0 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , UpperCAmelCase__ , )
| 91 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = "▁"
lowercase__ = {"vocab_file": "spiece.model"}
lowercase__ = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
lowercase__ = {
"google/reformer-crime-and-punishment": 524288,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowercase_ : Dict , lowercase_ : Tuple="</s>" , lowercase_ : Dict="<unk>" , lowercase_ : Tuple=[] , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : List[str] , ) -> None:
UpperCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase : List[Any] = vocab_file
UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self : List[str] ) -> Dict[str, int]:
UpperCAmelCase : int = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> str:
UpperCAmelCase : Tuple = self.__dict__.copy()
UpperCAmelCase : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any ) -> List[str]:
UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase : Dict = {}
UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple ) -> Optional[int]:
return self.sp_model.piece_to_id(lowercase_ )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] ) -> List[str]:
if index < self.sp_model.get_piece_size():
UpperCAmelCase : Tuple = self.sp_model.IdToPiece(lowercase_ )
return token
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Dict = []
UpperCAmelCase : int = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
UpperCAmelCase : Any = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : int = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 151 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ :
def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Tuple=[32, 64, 128] , UpperCamelCase__ : Tuple=[1, 2, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 4] , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : str=2.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : int=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Optional[Any]=1E-5 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : str=True , UpperCamelCase__ : int=10 , UpperCamelCase__ : Optional[int]=8 , UpperCamelCase__ : Optional[Any]=["stage1", "stage2"] , UpperCamelCase__ : Dict=[1, 2] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : str = embed_dim
SCREAMING_SNAKE_CASE : Tuple = hidden_sizes
SCREAMING_SNAKE_CASE : List[str] = depths
SCREAMING_SNAKE_CASE : Optional[int] = num_heads
SCREAMING_SNAKE_CASE : List[Any] = window_size
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = drop_path_rate
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = patch_norm
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : str = is_training
SCREAMING_SNAKE_CASE : int = scope
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : str = type_sequence_label_size
SCREAMING_SNAKE_CASE : str = encoder_stride
SCREAMING_SNAKE_CASE : Dict = out_features
SCREAMING_SNAKE_CASE : Dict = out_indices
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, pixel_values, labels
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __A ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = FocalNetModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : Optional[int] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __A ( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = FocalNetBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : str = FocalNetBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __A ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = FocalNetForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : Optional[Any] = FocalNetForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : str = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __A ( self : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : int = FocalNetForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : List[str] = 1
SCREAMING_SNAKE_CASE : Tuple = FocalNetForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase_ = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = FocalNetModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 , has_text_modality=UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self : List[str] ):
'''simple docstring'''
return
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def __A ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE : int = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE : Any = model_class(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : str = outputs.hidden_states
SCREAMING_SNAKE_CASE : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# FocalNet has a different seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE : Union[str, Any] = (
reshaped_hidden_states[0].view(UpperCamelCase__ , UpperCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[Any] = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE : Tuple = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Any = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
@slow
def __A ( self : Dict ):
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = FocalNetModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=UpperCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class lowercase__ ( unittest.TestCase):
@cached_property
def __A ( self : Optional[int] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**UpperCamelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = (FocalNetBackbone,) if is_torch_available() else ()
UpperCamelCase_ = FocalNetConfig
UpperCamelCase_ = False
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = FocalNetModelTester(self )
| 258 | import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = np.full((len(_lowercase ), sequence_length, 2) , _lowercase )
else:
SCREAMING_SNAKE_CASE : List[Any] = np.full((len(_lowercase ), sequence_length) , _lowercase )
for i, tensor in enumerate(_lowercase ):
if padding_side == "right":
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE : Any = tensor[:sequence_length]
else:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = tensor[:sequence_length]
return out_tensor.tolist()
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ord(_lowercase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
SCREAMING_SNAKE_CASE : Optional[Any] = unicodedata.category(_lowercase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = 42
UpperCamelCase_ = True
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = -100
UpperCamelCase_ = "pt"
def __A ( self : Optional[int] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE : str = '''label''' if '''label''' in features[0].keys() else '''labels'''
SCREAMING_SNAKE_CASE : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.pad(
UpperCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(batch['''entity_ids'''] ).shape[1]
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.padding_side
if padding_side == "right":
SCREAMING_SNAKE_CASE : int = [
list(UpperCamelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase__ )) for label in labels
]
else:
SCREAMING_SNAKE_CASE : str = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase__ )) + list(UpperCamelCase__ ) for label in labels
]
SCREAMING_SNAKE_CASE : List[str] = [feature['''ner_tags'''] for feature in features]
SCREAMING_SNAKE_CASE : Dict = padding_tensor(UpperCamelCase__ , -1 , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = [feature['''original_entity_spans'''] for feature in features]
SCREAMING_SNAKE_CASE : Dict = padding_tensor(UpperCamelCase__ , (-1, -1) , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.tensor(UpperCamelCase__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 258 | 1 |
"""simple docstring"""
from collections import deque
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
A__ = len(lowercase_ )
A__ = deque()
A__ = [False for _ in range(lowercase_ )]
A__ = [-1 for _ in range(lowercase_ )]
A__ = index_of[:]
def strong_connect(lowercase_ , lowercase_ , lowercase_ ):
A__ = index # the number when this node is seen
A__ = index # lowest rank node reachable from here
index += 1
stack.append(lowercase_ )
A__ = True
for w in g[v]:
if index_of[w] == -1:
A__ = strong_connect(lowercase_ , lowercase_ , lowercase_ )
A__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A__ = []
A__ = stack.pop()
A__ = False
component.append(lowercase_ )
while w != v:
A__ = stack.pop()
A__ = False
component.append(lowercase_ )
components.append(lowercase_ )
return index
A__ = []
for v in range(lowercase_ ):
if index_of[v] == -1:
strong_connect(lowercase_ , 0 , lowercase_ )
return components
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
A__ = [[] for _ in range(lowercase_ )]
for u, v in edges:
g[u].append(lowercase_ )
return g
if __name__ == "__main__":
# Test
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = [0, 0, 1, 2, 3, 3, 4, 4, 6]
SCREAMING_SNAKE_CASE = [1, 3, 2, 0, 1, 4, 5, 6, 5]
SCREAMING_SNAKE_CASE = [(u, v) for u, v in zip(source, target)]
SCREAMING_SNAKE_CASE = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 247 |
"""simple docstring"""
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
from torch.utils.cpp_extension import load
A__ = Path(lowercase_ ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
A__ = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , lowercase_ , with_cuda=lowercase_ , extra_include_paths=[str(lowercase_ )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 247 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''gpt_neox'''
def __init__( self , __UpperCAmelCase=5_04_32 , __UpperCAmelCase=61_44 , __UpperCAmelCase=44 , __UpperCAmelCase=64 , __UpperCAmelCase=2_45_76 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.2_5 , __UpperCAmelCase=1_00_00 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Tuple:
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =rotary_pct
_lowerCAmelCase =rotary_emb_base
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =hidden_dropout
_lowerCAmelCase =classifier_dropout
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =use_cache
_lowerCAmelCase =tie_word_embeddings
_lowerCAmelCase =use_parallel_residual
_lowerCAmelCase =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def __UpperCAmelCase ( self ) -> Optional[int]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
_lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase )
_lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 360 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
__A = parser.parse_args()
__A = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 341 | 0 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
_lowercase : Tuple ={
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8000,
"sample_size": 13_1072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
}
def lowerCAmelCase_ ( _lowercase : Optional[int] , _lowercase : str) -> Dict:
"""simple docstring"""
return torch.atana(_lowercase , _lowercase) / math.pi * 2
def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
a__ : List[Any] = torch.sin(t * math.pi / 2) ** 2
a__ : Any = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_lowercase , _lowercase)
class snake_case__ (_a ):
"""simple docstring"""
pass
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase ) -> List[Any]:
"""simple docstring"""
super().__init__()
a__ : List[Any] = DiffusionAttnUnetaD(SCREAMING_SNAKE_CASE__ , n_attn_layers=4 )
a__ : List[str] = deepcopy(self.diffusion )
a__ : Dict = torch.quasirandom.SobolEngine(1 , scramble=SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( _lowercase : Optional[Any]) -> Any:
"""simple docstring"""
a__ : List[Any] = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''')
return F'''./{model_name}.ckpt'''
_lowercase : Any ={
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
_lowercase : Optional[Any] ={
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
_lowercase : List[Any] ={
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
_lowercase : List[str] ={
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
_lowercase : Any ={
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
_lowercase : Any ={
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def lowerCAmelCase_ ( _lowercase : Tuple) -> str:
"""simple docstring"""
if name.startswith("""skip"""):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""])
# name has to be of format main.{digit}
if not name.startswith("""main."""):
raise ValueError(F'''ResConvBlock error with {name}''')
return name.replace(name[:6] , RES_CONV_MAP[name[:6]])
def lowerCAmelCase_ ( _lowercase : List[str]) -> Tuple:
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(_lowercase) and not isinstance(_lowercase , _lowercase):
return name.replace(_lowercase , _lowercase)
elif name.startswith(_lowercase):
return [name.replace(_lowercase , _lowercase) for v in value]
raise ValueError(F'''Attn error with {name}''')
def lowerCAmelCase_ ( _lowercase : str , _lowercase : List[str]=13) -> List[Any]:
"""simple docstring"""
a__ : Optional[Any] = input_string
if string.split(""".""")[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""")
a__ : List[str] = 0
if string.startswith("""net.3."""):
depth += 1
a__ : Union[str, Any] = string[6:]
elif string.startswith("""net."""):
a__ : int = string[4:]
while string.startswith("""main.7."""):
depth += 1
a__ : Union[str, Any] = string[7:]
if string.startswith("""main."""):
a__ : str = string[5:]
# mid block
if string[:2].isdigit():
a__ : Tuple = string[:2]
a__ : Optional[Any] = string[2:]
else:
a__ : Optional[Any] = string[0]
a__ : Optional[Any] = string[1:]
if depth == max_depth:
a__ : Any = MID_NUM_TO_LAYER[layer_num]
a__ : List[str] = "mid_block"
elif depth > 0 and int(_lowercase) < 7:
a__ : Union[str, Any] = DOWN_NUM_TO_LAYER[layer_num]
a__ : Dict = F'''down_blocks.{depth}'''
elif depth > 0 and int(_lowercase) > 7:
a__ : Any = UP_NUM_TO_LAYER[layer_num]
a__ : Union[str, Any] = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
a__ : Optional[int] = DEPTH_0_TO_LAYER[layer_num]
a__ : Any = F'''up_blocks.{max_depth - 1}''' if int(_lowercase) > 3 else "down_blocks.0"
if not string_left.startswith("""."""):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''')
a__ : List[Any] = string_left[1:]
if "resnets" in new_layer:
a__ : List[str] = convert_resconv_naming(_lowercase)
elif "attentions" in new_layer:
a__ : List[Any] = convert_attn_naming(_lowercase)
a__ : List[Any] = new_string_left
if not isinstance(_lowercase , _lowercase):
a__ : Tuple = prefix + "." + new_layer + "." + string_left
else:
a__ : int = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
a__ : int = {}
for k, v in state_dict.items():
if k.endswith("""kernel"""):
# up- and downsample layers, don't have trainable weights
continue
a__ : str = rename(_lowercase)
# check if we need to transform from Conv => Linear for attention
if isinstance(_lowercase , _lowercase):
a__ : Optional[int] = transform_conv_attns(_lowercase , _lowercase , _lowercase)
else:
a__ : Optional[int] = v
return new_state_dict
def lowerCAmelCase_ ( _lowercase : int , _lowercase : str , _lowercase : int) -> str:
"""simple docstring"""
if len(_lowercase) == 1:
if len(v.shape) == 3:
# weight
a__ : List[str] = v[:, :, 0]
else:
# bias
a__ : Optional[Any] = v
else:
# qkv matrices
a__ : Optional[int] = v.shape[0]
a__ : Optional[Any] = trippled_shape // 3
for i in range(3):
if len(v.shape) == 3:
a__ : Union[str, Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
a__ : List[Any] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowerCAmelCase_ ( _lowercase : Tuple) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""")
a__ : List[str] = args.model_path.split("""/""")[-1].split(""".""")[0]
if not os.path.isfile(args.model_path):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
a__ : List[str] = download(_lowercase)
a__ : List[str] = MODELS_MAP[model_name]["sample_rate"]
a__ : Tuple = MODELS_MAP[model_name]["sample_size"]
a__ : Union[str, Any] = Object()
a__ : int = sample_size
a__ : Any = sample_rate
a__ : List[str] = 0
a__ : Optional[Any] = UNetaDModel(sample_size=_lowercase , sample_rate=_lowercase)
a__ : Optional[Any] = diffusers_model.state_dict()
a__ : Optional[Any] = DiffusionUncond(_lowercase)
orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowercase)["""state_dict"""])
a__ : Union[str, Any] = orig_model.diffusion_ema.eval()
a__ : Dict = orig_model.state_dict()
a__ : Union[str, Any] = rename_orig_weights(_lowercase)
a__ : Dict = set(renamed_state_dict.keys()) - set(diffusers_state_dict.keys())
a__ : Dict = set(diffusers_state_dict.keys()) - set(renamed_state_dict.keys())
assert len(_lowercase) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("""kernel""") for k in list(_lowercase)), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
a__ : Dict = value.squeeze()
a__ : Union[str, Any] = value
diffusers_model.load_state_dict(_lowercase)
a__ : int = 100
a__ : int = 33
a__ : Any = IPNDMScheduler(num_train_timesteps=_lowercase)
a__ : str = torch.manual_seed(_lowercase)
a__ : Union[str, Any] = torch.randn([1, 2, config.sample_size] , generator=_lowercase).to(_lowercase)
a__ : int = torch.linspace(1 , 0 , steps + 1 , device=_lowercase)[:-1]
a__ : List[Any] = get_crash_schedule(_lowercase)
a__ : Union[str, Any] = DanceDiffusionPipeline(unet=_lowercase , scheduler=_lowercase)
a__ : Union[str, Any] = torch.manual_seed(33)
a__ : Union[str, Any] = pipe(num_inference_steps=_lowercase , generator=_lowercase).audios
a__ : Tuple = sampling.iplms_sample(_lowercase , _lowercase , _lowercase , {})
a__ : Union[str, Any] = generated.clamp(-1 , 1)
a__ : Union[str, Any] = (generated - audio).abs().sum()
a__ : str = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path)
print("""Diff sum""" , _lowercase)
print("""Diff max""" , _lowercase)
assert diff_max < 1e-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''')
if __name__ == "__main__":
_lowercase : Any =argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
_lowercase : int =parser.parse_args()
main(args)
| 170 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class _snake_case ( _a ):
_A : int = ComputeEnvironment.AMAZON_SAGEMAKER
_A : List[Any] = True
_A : Dict = '''ml.p3.2xlarge'''
_A : Any = '''accelerate_sagemaker_execution_role'''
_A : Union[str, Any] = '''hf-sm'''
_A : Dict = '''us-east-1'''
_A : List[Any] = 1
_A : Union[str, Any] = '''accelerate-sagemaker-1'''
_A : List[Any] = '''1.6'''
_A : Optional[Any] = '''4.4'''
_A : Any = '''train.py'''
_A : int = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
_A : Optional[Any] = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
SCREAMING_SNAKE_CASE:str = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] ,SCREAMING_SNAKE_CASE__ )
assert isinstance(converted_args["do_train"] ,SCREAMING_SNAKE_CASE__ )
assert isinstance(converted_args["epochs"] ,SCREAMING_SNAKE_CASE__ )
assert isinstance(converted_args["learning_rate"] ,SCREAMING_SNAKE_CASE__ )
assert isinstance(converted_args["max_steps"] ,SCREAMING_SNAKE_CASE__ )
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 139 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ : Tuple = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[Any] = SpeechTaTokenizer
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Optional[int] = True
def lowerCAmelCase__ ( self : Dict ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_: List[str] = SpeechTaTokenizer(snake_case_ )
UpperCamelCase_: str = AddedToken("""<mask>""" , lstrip=snake_case_ , rstrip=snake_case_ )
UpperCamelCase_: Any = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Optional[Any] ):
UpperCamelCase_: Dict = """this is a test"""
UpperCamelCase_: int = """this is a test"""
return input_text, output_text
def lowerCAmelCase__ ( self : int , snake_case_ : str , snake_case_ : Any=False , snake_case_ : Optional[int]=20 , snake_case_ : Dict=5 ):
UpperCamelCase_, UpperCamelCase_: List[Any] = self.get_input_output_texts(snake_case_ )
UpperCamelCase_: str = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: Optional[Any] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ )
return text, ids
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[int] = """<pad>"""
UpperCamelCase_: Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(snake_case_ ) , 81 )
def lowerCAmelCase__ ( self : List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[str] = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase_: Tuple = tokenizer.vocab_size
UpperCamelCase_: str = len(snake_case_ )
self.assertNotEqual(snake_case_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCamelCase_: Tuple = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
UpperCamelCase_: Tuple = tokenizer.add_tokens(snake_case_ )
UpperCamelCase_: Any = tokenizer.vocab_size
UpperCamelCase_: str = len(snake_case_ )
self.assertNotEqual(snake_case_ , 0 )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , len(snake_case_ ) )
self.assertEqual(snake_case_ , all_size + len(snake_case_ ) )
UpperCamelCase_: int = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=snake_case_ )
self.assertGreaterEqual(len(snake_case_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCamelCase_: int = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
UpperCamelCase_: str = tokenizer.add_special_tokens(snake_case_ )
UpperCamelCase_: Dict = tokenizer.vocab_size
UpperCamelCase_: List[str] = len(snake_case_ )
self.assertNotEqual(snake_case_ , 0 )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , len(snake_case_ ) )
self.assertEqual(snake_case_ , all_size_a + len(snake_case_ ) )
UpperCamelCase_: List[Any] = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=snake_case_ )
self.assertGreaterEqual(len(snake_case_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase__ ( self : List[str] ):
pass
def lowerCAmelCase__ ( self : Optional[int] ):
pass
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Dict = self.get_tokenizer()
UpperCamelCase_: int = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(snake_case_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
UpperCamelCase_: int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
UpperCamelCase_: Optional[int] = tokenizer.convert_tokens_to_ids(snake_case_ )
# fmt: off
self.assertListEqual(snake_case_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
UpperCamelCase_: Dict = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def lowerCAmelCase__ ( self : List[Any] ):
# Use custom sequence because this tokenizer does not handle numbers.
UpperCamelCase_: Tuple = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
UpperCamelCase_: Optional[int] = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=snake_case_ , )
| 223 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : Dict = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = """luke"""
def __init__( self : Tuple , snake_case_ : List[Any]=5_0267 , snake_case_ : Any=50_0000 , snake_case_ : str=768 , snake_case_ : int=256 , snake_case_ : str=12 , snake_case_ : int=12 , snake_case_ : Dict=3072 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : int=512 , snake_case_ : Dict=2 , snake_case_ : List[Any]=0.02 , snake_case_ : int=1e-12 , snake_case_ : Union[str, Any]=True , snake_case_ : Union[str, Any]=None , snake_case_ : Dict=1 , snake_case_ : Optional[int]=0 , snake_case_ : List[str]=2 , **snake_case_ : Union[str, Any] , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
UpperCamelCase_: Dict = vocab_size
UpperCamelCase_: Tuple = entity_vocab_size
UpperCamelCase_: Optional[int] = hidden_size
UpperCamelCase_: Any = entity_emb_size
UpperCamelCase_: str = num_hidden_layers
UpperCamelCase_: Union[str, Any] = num_attention_heads
UpperCamelCase_: Dict = hidden_act
UpperCamelCase_: Dict = intermediate_size
UpperCamelCase_: str = hidden_dropout_prob
UpperCamelCase_: List[str] = attention_probs_dropout_prob
UpperCamelCase_: int = max_position_embeddings
UpperCamelCase_: int = type_vocab_size
UpperCamelCase_: List[Any] = initializer_range
UpperCamelCase_: Union[str, Any] = layer_norm_eps
UpperCamelCase_: Tuple = use_entity_aware_attention
UpperCamelCase_: int = classifier_dropout
| 223 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __snake_case ( yaml.SafeLoader ):
def UpperCAmelCase__ ( self : Dict , A_ : Tuple):
lowerCAmelCase_ : Tuple = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowerCAmelCase_ : List[str] = [tuple(A_) if isinstance(A_ , A_) else key for key in keys]
lowerCAmelCase_ : Tuple = Counter(A_)
lowerCAmelCase_ : Union[str, Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""")
def UpperCAmelCase__ ( self : int , A_ : Optional[Any] , A_ : int=False):
lowerCAmelCase_ : int = super().construct_mapping(A_ , deep=A_)
self._check_no_duplicates_on_constructed_node(A_)
return mapping
def UpperCamelCase( __UpperCamelCase : str ):
lowerCAmelCase_ : int = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowerCAmelCase_ : int = full_content[1:].index('''---''' ) + 1
lowerCAmelCase_ : List[str] = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__UpperCamelCase )
class __snake_case ( UpperCamelCase_ ):
# class attributes
_a = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , A_ : Path):
with open(A_ , encoding='''utf-8''') as readme_file:
lowerCAmelCase_ , lowerCAmelCase_ : Any = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(A_)
else:
return cls()
def UpperCAmelCase__ ( self : Any , A_ : Path):
if path.exists():
with open(A_ , encoding='''utf-8''') as readme_file:
lowerCAmelCase_ : Tuple = readme_file.read()
else:
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Any = self._to_readme(A_)
with open(A_ , '''w''' , encoding='''utf-8''') as readme_file:
readme_file.write(A_)
def UpperCAmelCase__ ( self : Any , A_ : Optional[str] = None):
if readme_content is not None:
lowerCAmelCase_ , lowerCAmelCase_ : str = _split_yaml_from_readme(A_)
lowerCAmelCase_ : Tuple = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
lowerCAmelCase_ : Tuple = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def UpperCAmelCase__ ( cls : str , A_ : str):
lowerCAmelCase_ : str = yaml.load(A_ , Loader=_NoDuplicateSafeLoader) or {}
# Convert the YAML keys to DatasetMetadata fields
lowerCAmelCase_ : Union[str, Any] = {
(key.replace('''-''' , '''_''') if key.replace('''-''' , '''_''') in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**A_)
def UpperCAmelCase__ ( self : Optional[int]):
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''') if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=A_ , allow_unicode=A_ , encoding='''utf-8''' , ).decode('''utf-8''')
A__ : str = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
A__ : Dict = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
A__ : Optional[int] = ap.parse_args()
A__ : Optional[Any] = Path(args.readme_filepath)
A__ : Optional[Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 103 |
from datetime import datetime as dt
import os
from github import Github
A__ : List[str] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def UpperCamelCase( ):
lowerCAmelCase_ : Union[str, Any] = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCAmelCase_ : Tuple = g.get_repo('''huggingface/transformers''' )
lowerCAmelCase_ : int = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCAmelCase_ : Optional[Any] = sorted([comment for comment in issue.get_comments()] ,key=lambda __UpperCamelCase : i.created_at ,reverse=__UpperCamelCase )
lowerCAmelCase_ : Tuple = comments[0] if len(__UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 103 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """bert"""
def __init__( self : Dict , __lowercase : str=3_05_22 , __lowercase : Dict=7_68 , __lowercase : Tuple=12 , __lowercase : Tuple=12 , __lowercase : Any=30_72 , __lowercase : List[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : int=5_12 , __lowercase : Optional[int]=2 , __lowercase : List[str]=0.02 , __lowercase : Tuple=1e-12 , __lowercase : str=0 , __lowercase : Optional[Any]="absolute" , __lowercase : str=True , __lowercase : List[str]=None , **__lowercase : List[Any] , ) -> str:
super().__init__(pad_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Dict =num_attention_heads
SCREAMING_SNAKE_CASE__ : str =hidden_act
SCREAMING_SNAKE_CASE__ : List[str] =intermediate_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple =max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] =type_vocab_size
SCREAMING_SNAKE_CASE__ : int =initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any =position_embedding_type
SCREAMING_SNAKE_CASE__ : Any =use_cache
SCREAMING_SNAKE_CASE__ : Any =classifier_dropout
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@property
def __magic_name__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : Any ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ : Optional[int] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] ) | 222 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 222 | 1 |
"""simple docstring"""
from __future__ import annotations
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ) -> None:
A__ = order
# a_{0} ... a_{k}
A__ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A__ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A__ = [0.0] * self.order
# y[n-1] ... y[n-k]
A__ = [0.0] * self.order
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
if len(__UpperCAmelCase ) < self.order:
A__ = [1.0, *a_coeffs]
if len(__UpperCAmelCase ) != self.order + 1:
A__ = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__UpperCAmelCase )}'''
)
raise ValueError(__UpperCAmelCase )
if len(__UpperCAmelCase ) != self.order + 1:
A__ = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__UpperCAmelCase )}'''
)
raise ValueError(__UpperCAmelCase )
A__ = a_coeffs
A__ = b_coeffs
def snake_case__ ( self ,__UpperCAmelCase ) -> float:
A__ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 ,self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A__ = self.input_history[:-1]
A__ = self.output_history[:-1]
A__ = sample
A__ = result
return result
| 221 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Union[str, Any] = 'transfo-xl'
lowerCAmelCase__ : Any = ['mems']
lowerCAmelCase__ : Tuple = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,__UpperCAmelCase=26_77_35 ,__UpperCAmelCase=[2_00_00, 4_00_00, 20_00_00] ,__UpperCAmelCase=10_24 ,__UpperCAmelCase=10_24 ,__UpperCAmelCase=16 ,__UpperCAmelCase=64 ,__UpperCAmelCase=40_96 ,__UpperCAmelCase=4 ,__UpperCAmelCase=False ,__UpperCAmelCase=18 ,__UpperCAmelCase=16_00 ,__UpperCAmelCase=10_00 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=0 ,__UpperCAmelCase=-1 ,__UpperCAmelCase=True ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=True ,__UpperCAmelCase="normal" ,__UpperCAmelCase=0.0_1 ,__UpperCAmelCase=0.0_1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1e-5 ,__UpperCAmelCase=0 ,**__UpperCAmelCase ,) -> Tuple:
A__ = vocab_size
A__ = []
self.cutoffs.extend(__UpperCAmelCase )
if proj_share_all_but_first:
A__ = [False] + [True] * len(self.cutoffs )
else:
A__ = [False] + [False] * len(self.cutoffs )
A__ = d_model
A__ = d_embed
A__ = d_head
A__ = d_inner
A__ = div_val
A__ = pre_lnorm
A__ = n_layer
A__ = n_head
A__ = mem_len
A__ = same_length
A__ = attn_type
A__ = clamp_len
A__ = sample_softmax
A__ = adaptive
A__ = dropout
A__ = dropatt
A__ = untie_r
A__ = init
A__ = init_range
A__ = proj_init_std
A__ = init_std
A__ = layer_norm_epsilon
super().__init__(eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
@property
def snake_case__ ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case__ ( self ,__UpperCAmelCase ) -> int:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 221 | 1 |
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = {
'''joule''': 1.0,
'''kilojoule''': 1_0_0_0,
'''megajoule''': 1_0_0_0_0_0_0,
'''gigajoule''': 1_0_0_0_0_0_0_0_0_0,
'''wattsecond''': 1.0,
'''watthour''': 3_6_0_0,
'''kilowatthour''': 3_6_0_0_0_0_0,
'''newtonmeter''': 1.0,
'''calorie_nutr''': 4_1_8_6.8,
'''kilocalorie_nutr''': 4_1_8_6_8_0_0.0_0,
'''electronvolt''': 1.602176634E-19,
'''britishthermalunit_it''': 1_0_5_5.0_5_5_8_5,
'''footpound''': 1.355818,
}
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCAmelCase = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {', '.join(lowerCAmelCase )}'''
)
raise ValueError(lowerCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCAmelCase )
UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
UpperCAmelCase = parser.parse_args()
if not hasattr(lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 248 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
a_ :Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple, _snake_case : WhisperForConditionalGeneration, _snake_case : WhisperProcessor, _snake_case : AutoencoderKL, _snake_case : CLIPTextModel, _snake_case : CLIPTokenizer, _snake_case : UNetaDConditionModel, _snake_case : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], _snake_case : StableDiffusionSafetyChecker, _snake_case : CLIPImageProcessor, ) ->Optional[int]:
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=_snake_case, speech_processor=_snake_case, vae=_snake_case, text_encoder=_snake_case, tokenizer=_snake_case, unet=_snake_case, scheduler=_snake_case, feature_extractor=_snake_case, )
def lowercase_ ( self : List[str], _snake_case : Optional[Union[str, int]] = "auto" ) ->Union[str, Any]:
if slice_size == "auto":
snake_case__ : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_snake_case )
def lowercase_ ( self : Any ) ->Any:
self.enable_attention_slicing(_snake_case )
@torch.no_grad()
def __call__( self : List[Any], _snake_case : str, _snake_case : Dict=1_6_0_0_0, _snake_case : int = 5_1_2, _snake_case : int = 5_1_2, _snake_case : int = 5_0, _snake_case : float = 7.5, _snake_case : Optional[Union[str, List[str]]] = None, _snake_case : Optional[int] = 1, _snake_case : float = 0.0, _snake_case : Optional[torch.Generator] = None, _snake_case : Optional[torch.FloatTensor] = None, _snake_case : Optional[str] = "pil", _snake_case : bool = True, _snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, _snake_case : int = 1, **_snake_case : Any, ) ->int:
snake_case__ : Optional[int] = self.speech_processor.feature_extractor(
_snake_case, return_tensors='pt', sampling_rate=_snake_case ).input_features.to(self.device )
snake_case__ : Dict = self.speech_model.generate(_snake_case, max_length=4_8_0_0_0_0 )
snake_case__ : Optional[int] = self.speech_processor.tokenizer.batch_decode(_snake_case, skip_special_tokens=_snake_case, normalize=_snake_case )[
0
]
if isinstance(_snake_case, _snake_case ):
snake_case__ : Union[str, Any] = 1
elif isinstance(_snake_case, _snake_case ):
snake_case__ : int = len(_snake_case )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_snake_case )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_snake_case, _snake_case ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_snake_case )}.''' )
# get prompt text embeddings
snake_case__ : Dict = self.tokenizer(
_snake_case, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt', )
snake_case__ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case__ : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
snake_case__ : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case__ : Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
snake_case__ , snake_case__ , snake_case__ : Optional[int] = text_embeddings.shape
snake_case__ : Optional[int] = text_embeddings.repeat(1, _snake_case, 1 )
snake_case__ : str = text_embeddings.view(bs_embed * num_images_per_prompt, _snake_case, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case__ : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case__ : List[str]
if negative_prompt is None:
snake_case__ : Optional[Any] = [''] * batch_size
elif type(_snake_case ) is not type(_snake_case ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(_snake_case )} !='''
F''' {type(_snake_case )}.''' )
elif isinstance(_snake_case, _snake_case ):
snake_case__ : int = [negative_prompt]
elif batch_size != len(_snake_case ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(_snake_case )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
snake_case__ : Tuple = negative_prompt
snake_case__ : Dict = text_input_ids.shape[-1]
snake_case__ : int = self.tokenizer(
_snake_case, padding='max_length', max_length=_snake_case, truncation=_snake_case, return_tensors='pt', )
snake_case__ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case__ : List[Any] = uncond_embeddings.shape[1]
snake_case__ : str = uncond_embeddings.repeat(1, _snake_case, 1 )
snake_case__ : Union[str, Any] = uncond_embeddings.view(batch_size * num_images_per_prompt, _snake_case, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case__ : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case__ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
snake_case__ : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
snake_case__ : Dict = torch.randn(_snake_case, generator=_snake_case, device='cpu', dtype=_snake_case ).to(
self.device )
else:
snake_case__ : str = torch.randn(_snake_case, generator=_snake_case, device=self.device, dtype=_snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
snake_case__ : int = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
snake_case__ : Any = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case__ : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case__ : List[str] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case__ : Any = {}
if accepts_eta:
snake_case__ : Optional[int] = eta
for i, t in enumerate(self.progress_bar(_snake_case ) ):
# expand the latents if we are doing classifier free guidance
snake_case__ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case__ : Tuple = self.scheduler.scale_model_input(_snake_case, _snake_case )
# predict the noise residual
snake_case__ : str = self.unet(_snake_case, _snake_case, encoder_hidden_states=_snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
snake_case__ , snake_case__ : List[str] = noise_pred.chunk(2 )
snake_case__ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
snake_case__ : Union[str, Any] = self.scheduler.step(_snake_case, _snake_case, _snake_case, **_snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_snake_case, _snake_case, _snake_case )
snake_case__ : Optional[int] = 1 / 0.1_8_2_1_5 * latents
snake_case__ : Tuple = self.vae.decode(_snake_case ).sample
snake_case__ : Tuple = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case__ : int = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
snake_case__ : str = self.numpy_to_pil(_snake_case )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_snake_case, nsfw_content_detected=_snake_case )
| 277 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a_ :Any = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a_ :List[str] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a_ :List[str] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self : str ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), id='references' ),
} ), )
def lowercase_ ( self : str, _snake_case : List[List[List[str]]], _snake_case : List[List[str]], _snake_case : int = 1, _snake_case : int = 4, ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case, hypotheses=_snake_case, min_len=_snake_case, max_len=_snake_case )
}
| 277 | 1 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def _a ( _lowercase : dict , _lowercase : str , _lowercase : set , _lowercase : set , _lowercase : dict , _lowercase : dict , _lowercase : PriorityQueue , _lowercase : dict , _lowercase : float | int , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__UpperCAmelCase : int = cst_fwd.get(_lowercase , np.inf )
__UpperCAmelCase : Tuple = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__UpperCAmelCase : int = new_cost_f
__UpperCAmelCase : List[Any] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__UpperCAmelCase : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _a ( _lowercase : str , _lowercase : str , _lowercase : dict , _lowercase : dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = -1
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Any = set()
__UpperCAmelCase : Union[str, Any] = {source: 0}
__UpperCAmelCase : Tuple = {destination: 0}
__UpperCAmelCase : List[str] = {source: None}
__UpperCAmelCase : Any = {destination: None}
__UpperCAmelCase : PriorityQueue[Any] = PriorityQueue()
__UpperCAmelCase : PriorityQueue[Any] = PriorityQueue()
__UpperCAmelCase : List[Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__UpperCAmelCase : int = queue_forward.get()
visited_forward.add(_lowercase )
__UpperCAmelCase : List[Any] = queue_backward.get()
visited_backward.add(_lowercase )
__UpperCAmelCase : List[Any] = pass_and_relaxation(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , )
__UpperCAmelCase : Optional[int] = pass_and_relaxation(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__UpperCAmelCase : Union[str, Any] = shortest_distance
return shortest_path_distance
__UpperCAmelCase :int = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
__UpperCAmelCase :str = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 354 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase :List[Any] = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :List[Any] = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :List[Any] = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :List[str] = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__UpperCAmelCase :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 240 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 346 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 58 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ ) -> List[List[ImageInput]]:
if isinstance(a_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(a_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(a_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class a_ ( a_ ):
'''simple docstring'''
__a: List[Any] = ['''pixel_values''']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = size if size is not None else {'shortest_edge': 2_2_4}
lowerCAmelCase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowerCAmelCase_ = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase_ = get_size_dict(lowercase_ , param_name='crop_size' )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_center_crop
lowerCAmelCase_ = crop_size
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" in size:
lowerCAmelCase_ = get_resize_output_image_size(lowercase_ , size['shortest_edge'] , default_to_square=lowercase_ )
elif "height" in size and "width" in size:
lowerCAmelCase_ = (size['height'], size['width'])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Optional[int]:
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = to_numpy_array(lowercase_ )
if do_resize:
lowerCAmelCase_ = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ )
if do_center_crop:
lowerCAmelCase_ = self.center_crop(lowercase_ , size=lowercase_ )
if do_rescale:
lowerCAmelCase_ = self.rescale(image=lowercase_ , scale=lowercase_ )
if do_normalize:
lowerCAmelCase_ = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ )
lowerCAmelCase_ = to_channel_dimension_format(lowercase_ , lowercase_ )
return image
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image:
'''simple docstring'''
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowerCAmelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ = get_size_dict(lowercase_ , param_name='crop_size' )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
lowerCAmelCase_ = make_batched(lowercase_ )
lowerCAmelCase_ = [
[
self._preprocess_image(
image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , )
for img in video
]
for video in videos
]
lowerCAmelCase_ = {'pixel_values': videos}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 371 |
def lowerCamelCase ( a_ , a_ ) -> List[Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase ( a_ , a_ , a_ ) -> Union[str, Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
lowerCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 14 | 0 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_a = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase__ ( __snake_case ) -> Any:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__snake_case, id=__snake_case )
| 194 |
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = 9
_UpperCamelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_UpperCamelCase = kruskal(__snake_case, __snake_case )
_UpperCamelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__snake_case ) == sorted(__snake_case )
| 194 | 1 |
"""simple docstring"""
import functools
from typing import Any
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: list[str] ) -> bool:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not all(
isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
__lowerCamelCase : dict[str, Any] = {}
__lowerCamelCase : Tuple = "WORD_KEEPER"
for word in words:
__lowerCamelCase : int = trie
for c in word:
if c not in trie_node:
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Any = trie_node[c]
__lowerCamelCase : Any = True
__lowerCamelCase : Dict = len(_lowerCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(_lowerCamelCase: int ) -> bool:
if index == len_string:
return True
__lowerCamelCase : Tuple = trie
for i in range(_lowerCamelCase , _lowerCamelCase ):
__lowerCamelCase : int = trie_node.get(string[i] , _lowerCamelCase )
if trie_node is None:
return False
if trie_node.get(_lowerCamelCase , _lowerCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 64 | """simple docstring"""
def lowercase_ ( _lowerCamelCase: int = 100 ) -> int:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = set()
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : Optional[Any] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
__lowerCamelCase : Union[str, Any] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip()))) | 64 | 1 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
def lowercase ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Optional[int]=None , _snake_case : str=None ) ->Tuple:
"""simple docstring"""
if "." in tensor_name:
__snake_case : Optional[int] = tensor_name.split('''.''' )
for split in splits[:-1]:
__snake_case : Dict = getattr(_snake_case , _snake_case )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
__snake_case : Tuple = new_module
__snake_case : List[Any] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
__snake_case : str = tensor_name in module._buffers
__snake_case : Dict = getattr(_snake_case , _snake_case )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
__snake_case : List[str] = False
__snake_case : Any = False
if is_buffer or not is_bitsandbytes_available():
__snake_case : Dict = False
__snake_case : int = False
else:
__snake_case : List[str] = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__snake_case : Optional[Any] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__snake_case : int = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__snake_case : Optional[int] = old_value.to(_snake_case )
elif isinstance(_snake_case , torch.Tensor ):
__snake_case : Any = value.to('''cpu''' )
if value.dtype == torch.inta:
__snake_case : List[Any] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
__snake_case : Any = torch.tensor(_snake_case , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _snake_case ) and fpaa_statistics is None:
__snake_case : Optional[int] = new_value.T
__snake_case : str = old_value.__dict__
if is_abit:
__snake_case : Optional[Any] = bnb.nn.IntaParams(_snake_case , requires_grad=_snake_case , **_snake_case ).to(_snake_case )
elif is_abit:
__snake_case : Dict = bnb.nn.Paramsabit(_snake_case , requires_grad=_snake_case , **_snake_case ).to(_snake_case )
__snake_case : Union[str, Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(_snake_case ) )
else:
if value is None:
__snake_case : Optional[Any] = old_value.to(_snake_case )
elif isinstance(_snake_case , torch.Tensor ):
__snake_case : Union[str, Any] = value.to(_snake_case )
else:
__snake_case : List[Any] = torch.tensor(_snake_case , device=_snake_case )
if is_buffer:
__snake_case : Union[str, Any] = new_value
else:
__snake_case : List[Any] = nn.Parameter(_snake_case , requires_grad=old_value.requires_grad )
__snake_case : List[str] = new_value
def lowercase ( _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : List[str]=None , _snake_case : Optional[int]=None , _snake_case : List[str]=False ) ->List[Any]:
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
__snake_case : Optional[int] = []
current_key_name.append(_snake_case )
if (isinstance(_snake_case , nn.Linear ) or isinstance(_snake_case , _snake_case )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_snake_case ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_snake_case , _snake_case ):
__snake_case , __snake_case : List[Any] = module.weight.shape
else:
__snake_case : List[Any] = module.in_features
__snake_case : Any = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__snake_case : Any = bnb.nn.LinearabitLt(
_snake_case , _snake_case , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__snake_case : List[str] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__snake_case : Optional[int] = bnb.nn.Linearabit(
_snake_case , _snake_case , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__snake_case : str = True
# Store the module class in case we need to transpose the weight later
__snake_case : List[Any] = type(_snake_case )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_snake_case )
if len(list(module.children() ) ) > 0:
__snake_case , __snake_case : Union[str, Any] = _replace_with_bnb_linear(
_snake_case , _snake_case , _snake_case , _snake_case , has_been_replaced=_snake_case , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase ( _snake_case : Dict , _snake_case : Union[str, Any]=None , _snake_case : List[str]=None , _snake_case : Any=None ) ->int:
"""simple docstring"""
__snake_case : str = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
__snake_case , __snake_case : Optional[Any] = _replace_with_bnb_linear(
_snake_case , _snake_case , _snake_case , _snake_case )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def lowercase ( *_snake_case : Optional[Any] , **_snake_case : List[str] ) ->List[Any]:
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , _snake_case , )
return replace_with_bnb_linear(*_snake_case , **_snake_case )
def lowercase ( *_snake_case : Any , **_snake_case : Dict ) ->List[Any]:
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , _snake_case , )
return set_module_quantized_tensor_to_device(*_snake_case , **_snake_case )
def lowercase ( _snake_case : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
__snake_case : List[str] = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__snake_case : Optional[int] = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case , _snake_case ):
__snake_case : str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__snake_case : str = sum(_snake_case , [] )
__snake_case : Tuple = len(_snake_case ) > 0
# Check if it is a base model
__snake_case : List[str] = not hasattr(_snake_case , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__snake_case : List[str] = list(model.named_children() )
__snake_case : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
__snake_case : Optional[int] = set(_snake_case ) - set(_snake_case )
__snake_case : List[str] = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
__snake_case : int = ['''.weight''', '''.bias''']
__snake_case : str = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__snake_case : Dict = name.replace(_snake_case , '''''' )
filtered_module_names.append(_snake_case )
return filtered_module_names
| 102 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE : int = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE : Dict = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
SCREAMING_SNAKE_CASE : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__snake_case, metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__snake_case )}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'The input training data file (a text file).'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
lowerCamelCase__ =field(default=__snake_case, metadata={'help': 'Whether ot not to use whole word mask.'} )
lowerCamelCase__ =field(
default=0.1_5, metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowerCamelCase__ =field(
default=1 / 6, metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
}, )
lowerCamelCase__ =field(
default=5, metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
lowerCamelCase__ =field(
default=-1, metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowercase ( _snake_case : DataTrainingArguments , _snake_case : PreTrainedTokenizer , _snake_case : bool = False , _snake_case : Optional[str] = None , ) ->Any:
"""simple docstring"""
def _dataset(_snake_case : List[Any] , _snake_case : str=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , ref_path=_snake_case , )
return LineByLineTextDataset(tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_snake_case , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_snake_case ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowercase ( ) ->List[Any]:
"""simple docstring"""
__snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case , __snake_case , __snake_case : Union[str, Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__snake_case : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__snake_case : Dict = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case : List[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__snake_case : int = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__snake_case : List[Any] = AutoModelWithLMHead.from_config(_snake_case )
model.resize_token_embeddings(len(_snake_case ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__snake_case : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__snake_case : Optional[int] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__snake_case : Optional[Any] = (
get_dataset(_snake_case , tokenizer=_snake_case , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__snake_case : Any = (
get_dataset(_snake_case , tokenizer=_snake_case , evaluate=_snake_case , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__snake_case : List[Any] = DataCollatorForPermutationLanguageModeling(
tokenizer=_snake_case , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__snake_case : Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_snake_case , mlm_probability=data_args.mlm_probability )
else:
__snake_case : Union[str, Any] = DataCollatorForLanguageModeling(
tokenizer=_snake_case , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__snake_case : Optional[int] = Trainer(
model=_snake_case , args=_snake_case , data_collator=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , prediction_loss_only=_snake_case , )
# Training
if training_args.do_train:
__snake_case : Dict = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_snake_case )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Dict = math.exp(eval_output['''eval_loss'''] )
__snake_case : List[Any] = {'''perplexity''': perplexity}
__snake_case : str = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(_snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _snake_case , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(_snake_case )
return results
def lowercase ( _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 102 | 1 |
import mpmath # for roots of unity
import numpy as np
class __A :
def __init__( self : Optional[int] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : str=None ):
# Input as list
lowerCAmelCase : str = list(poly_a or [0] )[:]
lowerCAmelCase : Dict = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowerCAmelCase : Union[str, Any] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowerCAmelCase : Optional[Any] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowerCAmelCase : Tuple = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowerCAmelCase : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowerCAmelCase : Optional[Any] = self.__multiply()
def lowercase__ ( self : str , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : Dict = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase_ ) <= 1:
return dft[0]
#
lowerCAmelCase : Dict = self.c_max_length // 2
while next_ncol > 0:
lowerCAmelCase : Tuple = [[] for i in range(UpperCAmelCase_ )]
lowerCAmelCase : List[str] = self.root**next_ncol
# First half of next step
lowerCAmelCase : Any = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowerCAmelCase : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowerCAmelCase : List[str] = new_dft
lowerCAmelCase : int = next_ncol // 2
return dft[0]
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.__dft('A' )
lowerCAmelCase : Any = self.__dft('B' )
lowerCAmelCase : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowerCAmelCase : Optional[Any] = 2
while next_ncol <= self.c_max_length:
lowerCAmelCase : Optional[Any] = [[] for i in range(UpperCAmelCase_ )]
lowerCAmelCase : int = self.root ** (next_ncol // 2)
lowerCAmelCase : str = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowerCAmelCase : List[str] = new_inverse_c
next_ncol *= 2
# Unpack
lowerCAmelCase : int = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Optional[int] ):
lowerCAmelCase : str = 'A = ' + ' + '.join(
f"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowerCAmelCase : List[str] = 'B = ' + ' + '.join(
f"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowerCAmelCase : List[str] = 'A*B = ' + ' + '.join(
f"{coef}*x^{i}" for coef, i in enumerate(self.product ) )
return f"{a}\n{b}\n{c}"
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , ):
lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase : str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = min_resolution
lowerCAmelCase : Any = max_resolution
lowerCAmelCase : int = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : int = do_center_crop
lowerCAmelCase : str = crop_size
def lowercase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'crop_size' ) )
def lowercase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : List[str] ):
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Dict = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : List[str] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 323 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( UpperCAmelCase__ ):
@staticmethod
@abstractmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :ArgumentParser ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __UpperCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError()
| 276 |
'''simple docstring'''
A__: Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 276 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase : Dict = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( UpperCamelCase__):
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[int] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "tf_padding" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "depth_multiplier" ) )
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3 , lowerCAmelCase__=3_2 , lowerCAmelCase__=0.25 , lowerCAmelCase__=8 , lowerCAmelCase__=True , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=3_2 , lowerCAmelCase__="relu6" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=1_0 , lowerCAmelCase__=None , ) -> Dict:
'''simple docstring'''
a__ : int =parent
a__ : Optional[Any] =batch_size
a__ : Tuple =num_channels
a__ : Dict =image_size
a__ : Union[str, Any] =depth_multiplier
a__ : List[str] =min_depth
a__ : Dict =tf_padding
a__ : Any =int(last_hidden_size * depth_multiplier )
a__ : Tuple =output_stride
a__ : Optional[Any] =hidden_act
a__ : str =classifier_dropout_prob
a__ : int =use_labels
a__ : List[Any] =is_training
a__ : List[str] =num_labels
a__ : Dict =initializer_range
a__ : Tuple =scope
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Any =None
a__ : List[Any] =None
if self.use_labels:
a__ : Dict =ids_tensor([self.batch_size] , self.num_labels )
a__ : Tuple =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ : Optional[Any] =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowercase ( self ) -> str:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] =MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : str =model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : Any =self.num_labels
a__ : Dict =MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Optional[int] =model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : str =self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ : List[str] =config_and_inputs
a__ : Tuple ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : List[str] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_lowercase : Optional[int] = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : str = False
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] =MobileNetVaModelTester(self )
a__ : Optional[Any] =MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def _lowercase ( self ) -> Any:
'''simple docstring'''
pass
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ , a__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[int] =model_class(lowerCAmelCase__ )
a__ : str =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] =[*signature.parameters.keys()]
a__ : Union[str, Any] =["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : int =model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
a__ : List[Any] =model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : Optional[int] =outputs.hidden_states
a__ : Optional[int] =2_6
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
a__ , a__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[int] =True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : Union[str, Any] =True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> Any:
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : List[str] =MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _A ( ):
"""simple docstring"""
a__ : Optional[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
@cached_property
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : str =MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(lowerCAmelCase__ )
a__ : Optional[Any] =self.default_image_processor
a__ : Optional[int] =prepare_img()
a__ : Optional[int] =image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
a__ : int =model(**lowerCAmelCase__ )
# verify the logits
a__ : str =torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
a__ : int =torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 148 | 0 |
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase : str = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def _A ( ):
"""simple docstring"""
a__ : Tuple =Github(os.environ["GITHUB_TOKEN"] )
a__ : int =g.get_repo("huggingface/diffusers" )
a__ : int =repo.get_issues(state="open" )
for issue in open_issues:
a__ : int =sorted(issue.get_comments() , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE )
a__ : Tuple =comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 95 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] =size if size is not None else {"shortest_edge": 2_0}
a__ : List[str] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
a__ : Tuple =parent
a__ : Union[str, Any] =batch_size
a__ : List[str] =num_channels
a__ : List[Any] =image_size
a__ : str =min_resolution
a__ : Optional[int] =max_resolution
a__ : Tuple =do_resize
a__ : Union[str, Any] =size
a__ : List[Any] =do_center_crop
a__ : List[str] =crop_size
a__ : Optional[int] =do_flip_channel_order
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : int = MobileViTImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple =MobileViTImageProcessingTester(self )
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_flip_channel_order" ) )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
a__ : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : int =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[str] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 95 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
a_ : List[Any] = logging.get_logger(__name__)
def _A (lowerCAmelCase__ :Union[tf.Tensor, np.ndarray] ) -> List[int]:
'''simple docstring'''
if isinstance(UpperCAmelCase__ , np.ndarray ):
return list(tensor.shape )
_a = tf.shape(UpperCAmelCase__ )
if tensor.shape == tf.TensorShape(UpperCAmelCase__ ):
return dynamic
_a = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )]
def _A (lowerCAmelCase__ :tf.Tensor , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[str] = None ) -> tf.Tensor:
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ )
def _A (lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Tuple=1E-5 , lowerCAmelCase__ :List[str]=-1 ) -> List[str]:
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
_a = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_a = [1] * inputs.shape.rank
_a = shape_list(UpperCAmelCase__ )[axis]
_a = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
_a = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
# Compute layer normalization using the batch_normalization
# function.
_a = tf.nn.batch_normalization(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , )
return outputs
def _A (lowerCAmelCase__ :Dict , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Any=-1 ) -> Dict:
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_a = tf.shape(UpperCAmelCase__ )
_a = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_a = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
def _A (lowerCAmelCase__ :tf.Tensor ) -> tf.Tensor:
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , tf.Tensor ):
_a = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_a = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_a = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_a = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _A (lowerCAmelCase__ :tf.Tensor , lowerCAmelCase__ :int , lowerCAmelCase__ :str = "input_ids" ) -> None:
'''simple docstring'''
tf.debugging.assert_less(
UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=(
f'The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding '
f'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def _A (lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[str] ) -> Any:
'''simple docstring'''
_a = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_a = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
f'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
f'bytes: {bad_attributes}' )
_a = np.asarray(UpperCAmelCase__ )
_a = 1
_a = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_a = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ):
_a = chunk_data
else:
_a = data
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] ) -> str:
'''simple docstring'''
if name in group.attrs:
_a = [n.decode('utf8' ) if hasattr(UpperCAmelCase__ , 'decode' ) else n for n in group.attrs[name]]
else:
_a = []
_a = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(UpperCAmelCase__ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def _A (lowerCAmelCase__ :Optional[Any] ) -> Any:
'''simple docstring'''
def _expand_single_ad_tensor(lowerCAmelCase__ :Optional[Any] ):
if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
| 371 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a_ : Optional[Any] = TypeVar("T")
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (position - 1) // 2
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 1
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 2
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = []
_a = {}
_a = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def __UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_a = self.elements
self.elements += 1
self._bubble_up(__magic_name__ )
def __UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_a , _a = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_a , _a = self.heap[0]
self._bubble_down(__magic_name__ )
return elem
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Update the weight of the given key
_a = self.position_map[elem]
_a = (elem, weight)
if position > 0:
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_a = self.position_map[elem]
if curr_pos == 0:
return None
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[curr_pos]
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_up(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_a = self.position_map[elem]
_a , _a = self.heap[curr_pos]
_a = get_child_left_position(__magic_name__ )
_a = get_child_right_position(__magic_name__ )
if child_left_position < self.elements and child_right_position < self.elements:
_a , _a = self.heap[child_left_position]
_a , _a = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
if child_left_position < self.elements:
_a , _a = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
else:
return None
if child_right_position < self.elements:
_a , _a = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Swap the nodes at the given positions
_a = self.heap[nodea_pos][0]
_a = self.heap[nodea_pos][0]
_a , _a = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_a = nodea_pos
_a = nodea_pos
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = {}
_a = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_a = {}
self.nodes += 1
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__magic_name__ )
self.add_node(__magic_name__ )
_a = weight
_a = weight
def _A (lowerCAmelCase__ :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
_a = {node: maxsize for node in graph.connections}
_a = {node: None for node in graph.connections}
_a = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase__ , lowerCAmelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
_a = priority_queue.extract_min()
_a = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
# running prim's algorithm
while not priority_queue.is_empty():
_a = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
return dist, parent
| 104 | 0 |
import argparse
import struct
import unittest
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : bytes ) -> None:
a_ : Tuple = data
# Initialize hash values
a_ : Tuple = [
0X6a09_e667,
0Xbb67_ae85,
0X3c6e_f372,
0Xa54f_f53a,
0X510e_527f,
0X9b05_688c,
0X1f83_d9ab,
0X5be0_cd19,
]
# Initialize round constants
a_ : Tuple = [
0X428a_2f98,
0X7137_4491,
0Xb5c0_fbcf,
0Xe9b5_dba5,
0X3956_c25b,
0X59f1_11f1,
0X923f_82a4,
0Xab1c_5ed5,
0Xd807_aa98,
0X1283_5b01,
0X2431_85be,
0X550c_7dc3,
0X72be_5d74,
0X80de_b1fe,
0X9bdc_06a7,
0Xc19b_f174,
0Xe49b_69c1,
0Xefbe_4786,
0X0fc1_9dc6,
0X240c_a1cc,
0X2de9_2c6f,
0X4a74_84aa,
0X5cb0_a9dc,
0X76f9_88da,
0X983e_5152,
0Xa831_c66d,
0Xb003_27c8,
0Xbf59_7fc7,
0Xc6e0_0bf3,
0Xd5a7_9147,
0X06ca_6351,
0X1429_2967,
0X27b7_0a85,
0X2e1b_2138,
0X4d2c_6dfc,
0X5338_0d13,
0X650a_7354,
0X766a_0abb,
0X81c2_c92e,
0X9272_2c85,
0Xa2bf_e8a1,
0Xa81a_664b,
0Xc24b_8b70,
0Xc76c_51a3,
0Xd192_e819,
0Xd699_0624,
0Xf40e_3585,
0X106a_a070,
0X19a4_c116,
0X1e37_6c08,
0X2748_774c,
0X34b0_bcb5,
0X391c_0cb3,
0X4ed8_aa4a,
0X5b9c_ca4f,
0X682e_6ff3,
0X748f_82ee,
0X78a5_636f,
0X84c8_7814,
0X8cc7_0208,
0X90be_fffa,
0Xa450_6ceb,
0Xbef9_a3f7,
0Xc671_78f2,
]
a_ : int = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : bytes ) -> bytes:
a_ : Any = b'\x80' + (b'\x00' * (6_3 - (len(SCREAMING_SNAKE_CASE__ ) + 8) % 6_4))
a_ : List[str] = struct.pack('>Q' , (len(SCREAMING_SNAKE_CASE__ ) * 8) )
return data + padding + big_endian_integer
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> None:
# Convert into blocks of 64 bytes
a_ : int = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data ) , 6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
a_ : List[str] = list(struct.unpack('>16L' , SCREAMING_SNAKE_CASE__ ) )
# add 48 0-ed integers
words += [0] * 4_8
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ : Tuple = self.hashes
for index in range(0 , 6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
a_ : Optional[int] = (
self.ror(words[index - 1_5] , 7 )
^ self.ror(words[index - 1_5] , 1_8 )
^ (words[index - 1_5] >> 3)
)
a_ : List[str] = (
self.ror(words[index - 2] , 1_7 )
^ self.ror(words[index - 2] , 1_9 )
^ (words[index - 2] >> 1_0)
)
a_ : Union[str, Any] = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0X1_0000_0000
# Compression
a_ : Any = self.ror(SCREAMING_SNAKE_CASE__ , 6 ) ^ self.ror(SCREAMING_SNAKE_CASE__ , 1_1 ) ^ self.ror(SCREAMING_SNAKE_CASE__ , 2_5 )
a_ : List[str] = (e & f) ^ ((~e & 0Xffff_ffff) & g)
a_ : Tuple = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0000_0000
a_ : int = self.ror(SCREAMING_SNAKE_CASE__ , 2 ) ^ self.ror(SCREAMING_SNAKE_CASE__ , 1_3 ) ^ self.ror(SCREAMING_SNAKE_CASE__ , 2_2 )
a_ : Optional[Any] = (a & b) ^ (a & c) ^ (b & c)
a_ : Tuple = (sa + maj) % 0X1_0000_0000
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ : List[str] = (
g,
f,
e,
((d + tempa) % 0X1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0X1_0000_0000),
)
a_ : Tuple = [a, b, c, d, e, f, g, h]
# Modify final values
a_ : Dict = [
((element + mutated_hash_values[index]) % 0X1_0000_0000)
for index, element in enumerate(self.hashes )
]
a_ : Any = ''.join([hex(SCREAMING_SNAKE_CASE__ )[2:].zfill(8 ) for value in self.hashes] )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
return 0Xffff_ffff & (value << (3_2 - rotations)) | (value >> rotations)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> None:
import hashlib
a_ : Union[str, Any] = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(SCREAMING_SNAKE_CASE__ ).hash , hashlib.shaaaa(SCREAMING_SNAKE_CASE__ ).hexdigest() )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
a_ : Dict = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
a_ : Optional[int] = parser.parse_args()
a_ : List[Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
a_ : Optional[Any] = f.read()
else:
a_ : Optional[Any] = bytes(__A , 'utf-8' )
print(SHAaaa(__A ).hash )
if __name__ == "__main__":
main()
| 32 |
from jiwer import compute_measures
import datasets
__snake_case : Dict ='\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__snake_case : Optional[Any] ='\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__snake_case : Any ='\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/jitsi/jiwer/'''] ,reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] ,)
def lowerCAmelCase__ (self ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=False ) -> Any:
"""simple docstring"""
if concatenate_texts:
return compute_measures(__lowerCamelCase ,__lowerCamelCase )["wer"]
else:
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Tuple = 0
for prediction, reference in zip(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Dict = compute_measures(__lowerCamelCase ,__lowerCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 129 | 0 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__UpperCAmelCase = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
__UpperCAmelCase = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
__UpperCAmelCase = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : Optional[int] , a : int , a : int=None , a : Optional[Any]=True , a : Optional[Any]=False ):
"""simple docstring"""
if rouge_types is None:
__lowerCamelCase = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
__lowerCamelCase = rouge_scorer.RougeScorer(rouge_types=a , use_stemmer=a )
if use_aggregator:
__lowerCamelCase = scoring.BootstrapAggregator()
else:
__lowerCamelCase = []
for ref, pred in zip(a , a ):
__lowerCamelCase = scorer.score(a , a )
if use_aggregator:
aggregator.add_scores(a )
else:
scores.append(a )
if use_aggregator:
__lowerCamelCase = aggregator.aggregate()
else:
__lowerCamelCase = {}
for key in scores[0]:
__lowerCamelCase = [score[key] for score in scores]
return result
| 353 | '''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict =(DPMSolverSDEScheduler,)
lowerCamelCase : List[str] =1_0
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , **a : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**a )
return config
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=a , beta_end=a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCamelCase = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCamelCase = scheduler.scale_model_input(a , a )
__lowerCamelCase = model(a , a )
__lowerCamelCase = scheduler.step(a , a , a )
__lowerCamelCase = output.prev_sample
__lowerCamelCase = torch.sum(torch.abs(a ) )
__lowerCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1e-2
assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
__lowerCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCamelCase = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCamelCase = scheduler.scale_model_input(a , a )
__lowerCamelCase = model(a , a )
__lowerCamelCase = scheduler.step(a , a , a )
__lowerCamelCase = output.prev_sample
__lowerCamelCase = torch.sum(torch.abs(a ) )
__lowerCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1e-2
assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1e-2
assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1e-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1e-2
assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowerCamelCase = scheduler.scale_model_input(a , a )
__lowerCamelCase = model(a , a )
__lowerCamelCase = scheduler.step(a , a , a )
__lowerCamelCase = output.prev_sample
__lowerCamelCase = torch.sum(torch.abs(a ) )
__lowerCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1e-2
assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
__lowerCamelCase = sample.to(a )
for t in scheduler.timesteps:
__lowerCamelCase = scheduler.scale_model_input(a , a )
__lowerCamelCase = model(a , a )
__lowerCamelCase = scheduler.step(a , a , a )
__lowerCamelCase = output.prev_sample
__lowerCamelCase = torch.sum(torch.abs(a ) )
__lowerCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
| 237 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "encoder-decoder"
_lowerCamelCase = True
def __init__( self , **UpperCamelCase ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCamelCase_ = kwargs.pop("encoder" )
lowerCamelCase_ = encoder_config.pop("model_type" )
lowerCamelCase_ = kwargs.pop("decoder" )
lowerCamelCase_ = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase_ = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase )
lowerCamelCase_ = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase )
lowerCamelCase_ = True
@classmethod
def snake_case ( cls , UpperCamelCase , UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowerCamelCase_ = True
lowerCamelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
lowerCamelCase_ = self.encoder.to_dict()
lowerCamelCase_ = self.decoder.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
| 55 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(snake_case , 'num_attention_heads' ) )
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=3 , snake_case=2 , snake_case=1 , snake_case=16 , snake_case=[128, 256, 384] , snake_case=[4, 6, 8] , snake_case=[2, 3, 4] , snake_case=[16, 16, 16] , snake_case=0 , snake_case=[2, 2, 2] , snake_case=[2, 2, 2] , snake_case=0.02 , snake_case=True , snake_case=True , snake_case=2 , ):
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = kernel_size
lowercase = stride
lowercase = padding
lowercase = hidden_sizes
lowercase = num_attention_heads
lowercase = depths
lowercase = key_dim
lowercase = drop_path_rate
lowercase = patch_size
lowercase = attention_ratio
lowercase = mlp_ratio
lowercase = initializer_range
lowercase = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase = is_training
lowercase = use_labels
lowercase = num_labels
lowercase = initializer_range
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = LevitModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case )
lowercase = (self.image_size, self.image_size)
lowercase , lowercase = image_size[0], image_size[1]
for _ in range(4 ):
lowercase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = self.num_labels
lowercase = LevitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_UpperCamelCase : Dict = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_UpperCamelCase : Dict = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LevitModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self ):
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='Levit does not output attentions' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(snake_case )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
def check_hidden_states_output(snake_case , snake_case , snake_case ):
lowercase = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(snake_case , snake_case ) )
lowercase = outputs.hidden_states
lowercase = len(self.model_tester.depths ) + 1
self.assertEqual(len(snake_case ) , snake_case )
lowercase = (self.model_tester.image_size, self.model_tester.image_size)
lowercase , lowercase = image_size[0], image_size[1]
for _ in range(4 ):
lowercase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=False ):
lowercase = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.model_tester.is_training:
return
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(snake_case )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase = model_class(snake_case )
model.to(snake_case )
model.train()
lowercase = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
lowercase = model(**snake_case ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase = False
lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase = model_class(snake_case )
model.gradient_checkpointing_enable()
model.to(snake_case )
model.train()
lowercase = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
lowercase = model(**snake_case ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(snake_case ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
lowercase = problem_type['title']
lowercase = problem_type['num_labels']
lowercase = model_class(snake_case )
model.to(snake_case )
model.train()
lowercase = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if problem_type["num_labels"] > 1:
lowercase = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
lowercase = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=snake_case ) as warning_list:
lowercase = model(**snake_case ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = LevitModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def UpperCAmelCase_ ( ):
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
snake_case )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
lowercase = model(**snake_case )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
lowercase = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 195 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[str]:
lowercase__: Union[str, Any] = []
lowercase__: Optional[Any] = 1_1
lowercase__: int = int('''1''' + '''0''' * digit_len )
for num in range(__snake_case , __snake_case ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(__snake_case , __snake_case ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
lowercase__: List[str] = 1_0
return solutions
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 2 ) -> int:
lowercase__: Any = 1.0
for fraction in fraction_list(__snake_case ):
lowercase__: Union[str, Any] = Fraction(__snake_case )
result *= frac.denominator / frac.numerator
return int(__snake_case )
if __name__ == "__main__":
print(solution())
| 361 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "beit"
def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: int = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: List[str] = initializer_range
lowercase__: Optional[int] = layer_norm_eps
lowercase__: int = image_size
lowercase__: Tuple = patch_size
lowercase__: int = num_channels
lowercase__: Optional[Any] = use_mask_token
lowercase__: List[Any] = use_absolute_position_embeddings
lowercase__: Optional[int] = use_relative_position_bias
lowercase__: Optional[int] = use_shared_relative_position_bias
lowercase__: Optional[Any] = layer_scale_init_value
lowercase__: Union[str, Any] = drop_path_rate
lowercase__: Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__: Tuple = out_indices
lowercase__: Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__: List[str] = use_auxiliary_head
lowercase__: Optional[Any] = auxiliary_loss_weight
lowercase__: str = auxiliary_channels
lowercase__: List[str] = auxiliary_num_convs
lowercase__: Tuple = auxiliary_concat_input
lowercase__: Dict = semantic_loss_ignore_index
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = version.parse("1.11" )
@property
def _snake_case ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
return 1e-4
| 2 | 0 |
from __future__ import annotations
from random import choice
def _a ( SCREAMING_SNAKE_CASE_ : List[str] ):
return choice(SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = random_pivot(SCREAMING_SNAKE_CASE_ )
# partition based on pivot
# linear time
__lowerCAmelCase = [e for e in lst if e < pivot]
__lowerCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(SCREAMING_SNAKE_CASE_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(SCREAMING_SNAKE_CASE_ ) < k - 1:
return kth_number(SCREAMING_SNAKE_CASE_ , k - len(SCREAMING_SNAKE_CASE_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | """simple docstring"""
from __future__ import annotations
import numpy as np
def UpperCAmelCase__ ( lowerCAmelCase__ :list[float] ) -> Optional[Any]:
'''simple docstring'''
return np.maximum(0 , lowerCAmelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 197 | 0 |
"""simple docstring"""
import math
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Tuple = 0.0
lowercase_ : Optional[Any] = 0.0
for i in range(len(__UpperCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) ,2 )
da += math.pow((sample[i] - weights[1][i]) ,2 )
return 0 if da > da else 1
return 0
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> list[list[int | float]]:
'''simple docstring'''
for i in range(len(__UpperCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__( ):
# Training Examples ( m, n )
lowercase_ : List[Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowercase_ : str = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowercase_ : str = SelfOrganizingMap()
lowercase_ : Optional[int] = 3
lowercase_ : int = 0.5
for _ in range(__SCREAMING_SNAKE_CASE ):
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
# training sample
lowercase_ : str = training_samples[j]
# Compute the winning vector
lowercase_ : Tuple = self_organizing_map.get_winner(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Update the winning vector
lowercase_ : Optional[int] = self_organizing_map.update(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# classify test sample
lowercase_ : List[Any] = [0, 0, 0, 1]
lowercase_ : Optional[int] = self_organizing_map.get_winner(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 321 | """simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Any = use_input_mask
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Union[str, Any] = scope
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = self.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = True
lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,)
lowercase_ : Dict = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
# first forward pass
lowercase_ : str = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,)
lowercase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase_ : int = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
lowercase_ : List[Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
# select random slice
lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoderTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs()
lowercase_ : Optional[int] = 'bert'
self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Tuple = model(__UpperCamelCase )[0]
lowercase_ : Dict = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321 | 1 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : int = logging.get_logger(__name__)
def lowerCAmelCase_ (lowerCAmelCase__: Tuple ):
"""simple docstring"""
UpperCAmelCase_: int = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
UpperCAmelCase_: Optional[int] = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCAmelCase__ )
if matches:
UpperCAmelCase_: List[str] = float(matches[1] )
UpperCAmelCase_: List[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
UpperCAmelCase_: Dict = 1_0_0_1
UpperCAmelCase_: int = 'imagenet-1k-id2label.json'
UpperCAmelCase_: Optional[Any] = 'huggingface/label-files'
UpperCAmelCase_: Dict = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase_: Optional[int] = {int(lowerCAmelCase__ ) + 1: v for k, v in idalabel.items()}
UpperCAmelCase_: Tuple = 'background'
UpperCAmelCase_: List[Any] = idalabel
UpperCAmelCase_: str = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase_: List[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: int , lowerCAmelCase__: int , lowerCAmelCase__: List[str]=False ):
"""simple docstring"""
UpperCAmelCase_: Any = get_mobilenet_va_config(lowerCAmelCase__ )
# Load 🤗 model
UpperCAmelCase_: Union[str, Any] = MobileNetVaForImageClassification(lowerCAmelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
UpperCAmelCase_: List[Any] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 3_2} , )
UpperCAmelCase_: List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
UpperCAmelCase_: Optional[Any] = model(**lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
UpperCAmelCase_: Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
UpperCAmelCase_: int = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
UpperCAmelCase_: Union[str, Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1e-4 )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
UpperCAmelCase_: Optional[Any] = 'google/' + model_name
image_processor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 147 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Any = {'vocab_file': 'spiece.model'}
lowerCAmelCase : Tuple = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
lowerCAmelCase : Optional[int] = {'bert_for_seq_generation': 5_12}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[int] = []
SCREAMING_SNAKE_CASE : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<::::>" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] = vocab_file
SCREAMING_SNAKE_CASE_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
return token
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
SCREAMING_SNAKE_CASE_ : Optional[int] = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 253 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = StableUnCLIPImgaImgPipeline
_A : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_A : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A : Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_A : Union[str, Any] = frozenset([] )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = 32
__lowercase : int = embedder_hidden_size
# image encoding components
__lowercase : Dict = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__lowercase : Tuple = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__a , projection_dim=__a , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__lowercase : Tuple = StableUnCLIPImageNormalizer(embedding_dim=__a )
__lowercase : Union[str, Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
__lowercase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__lowercase : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__lowercase : List[str] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__a , layers_per_block=1 , upcast_attention=__a , use_linear_projection=__a , )
torch.manual_seed(0 )
__lowercase : int = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__a , steps_offset=1 , )
torch.manual_seed(0 )
__lowercase : Union[str, Any] = AutoencoderKL()
__lowercase : Tuple = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def lowerCAmelCase ( self : List[Any] , __a : int , __a : List[str]=0 , __a : Union[str, Any]=True ) -> Union[str, Any]:
"""simple docstring"""
if str(__a ).startswith("""mps""" ):
__lowercase : Optional[int] = torch.manual_seed(__a )
else:
__lowercase : List[Any] = torch.Generator(device=__a ).manual_seed(__a )
__lowercase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
if pil_image:
__lowercase : Optional[Any] = input_image * 0.5 + 0.5
__lowercase : List[str] = input_image.clamp(0 , 1 )
__lowercase : Any = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowercase : Optional[Any] = DiffusionPipeline.numpy_to_pil(__a )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase : int = self.get_dummy_components()
__lowercase : int = StableUnCLIPImgaImgPipeline(**__a )
__lowercase : Optional[int] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__lowercase : Optional[Any] = self.get_dummy_inputs(__a )
inputs.update({"""image_embeds""": None} )
__lowercase : Union[str, Any] = sd_pipe(**__a ).images
__lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase : Union[str, Any] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=__a )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=__a )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__a )
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
__lowercase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
__lowercase : Any = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase : List[Any] = pipe(__a , """anime turle""" , generator=__a , output_type="""np""" )
__lowercase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
__lowercase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
__lowercase : Dict = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase : Union[str, Any] = pipe(__a , """anime turle""" , generator=__a , output_type="""np""" )
__lowercase : Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__a , __a )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
__lowercase : List[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase : Dict = pipe(
__a , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
__lowercase : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 306 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = (DPMSolverSDEScheduler,)
_A : Dict = 10
def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**__a )
return config
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.scheduler_classes[0]
__lowercase : List[str] = self.get_scheduler_config()
__lowercase : Any = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[Any] = self.dummy_model()
__lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Optional[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Optional[Any] = scheduler.step(__a , __a , __a )
__lowercase : str = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowercase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[int] = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Dict = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Dict = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[int] = model(__a , __a )
__lowercase : Optional[int] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : List[str] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config()
__lowercase : Optional[int] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : int = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase : int = scheduler.scale_model_input(__a , __a )
__lowercase : List[str] = model(__a , __a )
__lowercase : List[str] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : List[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.scheduler_classes[0]
__lowercase : List[Any] = self.get_scheduler_config()
__lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : List[str] = self.dummy_model()
__lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
__lowercase : str = sample.to(__a )
for t in scheduler.timesteps:
__lowercase : List[Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Any = scheduler.step(__a , __a , __a )
__lowercase : Optional[Any] = output.prev_sample
__lowercase : Any = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 | 306 | 1 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
snake_case = 0
snake_case = [0]
snake_case = [0]
snake_case = len(lowerCAmelCase )
self.assertEqual(k.knapsack(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , 0 )
snake_case = [60]
snake_case = [10]
snake_case = len(lowerCAmelCase )
self.assertEqual(k.knapsack(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , 0 )
def snake_case ( self ):
"""simple docstring"""
snake_case = 3
snake_case = [1, 2, 3]
snake_case = [3, 2, 1]
snake_case = len(lowerCAmelCase )
self.assertEqual(k.knapsack(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , 5 )
def snake_case ( self ):
"""simple docstring"""
snake_case = 50
snake_case = [60, 1_00, 1_20]
snake_case = [10, 20, 30]
snake_case = len(lowerCAmelCase )
self.assertEqual(k.knapsack(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , 2_20 )
if __name__ == "__main__":
unittest.main()
| 150 | """simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE__ = HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
SCREAMING_SNAKE_CASE__ = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 150 | 1 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Any=False ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
__a =os.path.abspath(_snake_case )
logger.info(F'Loading PyTorch weights from {pt_path}' )
__a =torch.load(_snake_case , map_location='cpu' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
__a =convert_pytorch_state_dict_to_flax(_snake_case , _snake_case )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__a =convert_pytorch_sharded_state_dict_to_flax(_snake_case , _snake_case )
return flax_state_dict
def UpperCamelCase_( _snake_case : Tuple[str] , _snake_case : np.ndarray , _snake_case : Dict[str, jnp.ndarray] , _snake_case : str , ):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(_snake_case : Tuple[str] ) -> bool:
return len(set(_snake_case ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__a =pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__a =pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__a =pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_snake_case ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__a =pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_snake_case ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__a =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_snake_case ):
__a =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__a =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_snake_case ):
__a =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__a =pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__a =pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__a =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__a =pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__a =pt_tuple_key[-2] + '_v'
if name is not None:
__a =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_( _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
__a ={k: v.numpy() for k, v in pt_state_dict.items()}
__a =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__a =flax_model.params['params']
else:
__a =flax_model.params
__a =flatten_dict(_snake_case )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__a =flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(_snake_case )
__a ={}
__a =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__a =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__a =tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__a =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__a =pt_tuple_key[1:]
# Correctly rename weight parameters
__a , __a =rename_key_and_reshape_tensor(
_snake_case , _snake_case , _snake_case , _snake_case )
# add model prefix if necessary
__a =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__a =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__a =jnp.asarray(_snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_snake_case , _snake_case )
continue
# also add unexpected weight so that warning is thrown
__a =jnp.asarray(_snake_case )
else:
# also add unexpected weight so that warning is thrown
__a =jnp.asarray(_snake_case )
return unflatten_dict(_snake_case )
def UpperCamelCase_( _snake_case : int , _snake_case : str ):
"""simple docstring"""
import torch
# Load the index
__a ={}
for shard_file in shard_filenames:
# load using msgpack utils
__a =torch.load(_snake_case )
__a ={k: v.numpy() for k, v in pt_state_dict.items()}
__a =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__a =flax_model.params['params']
__a =flatten_dict(_snake_case )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
__a =flax_model.params
__a =flatten_dict(_snake_case )
__a =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__a =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__a =tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__a =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__a =pt_tuple_key[1:]
# Correctly rename weight parameters
__a , __a =rename_key_and_reshape_tensor(
_snake_case , _snake_case , _snake_case , _snake_case )
# add model prefix if necessary
__a =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__a =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__a =jnp.asarray(_snake_case )
continue
if "var" in flax_key[-1]:
__a =jnp.asarray(_snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_snake_case , _snake_case )
continue
# also add unexpected weight so that warning is thrown
__a =jnp.asarray(_snake_case )
else:
# also add unexpected weight so that warning is thrown
__a =jnp.asarray(_snake_case )
return unflatten_dict(_snake_case )
def UpperCamelCase_( _snake_case : Union[str, Any] , _snake_case : Optional[Any] ):
"""simple docstring"""
__a =os.path.abspath(_snake_case )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
__a =getattr(_snake_case , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(_snake_case , 'rb' ) as state_f:
try:
__a =from_bytes(_snake_case , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Dict ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__a =flatten_dict(jax.tree_util.tree_map(lambda _snake_case : x.dtype == jnp.bfloataa , _snake_case ) ).values()
if any(_snake_case ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__a =jax.tree_util.tree_map(
lambda _snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _snake_case )
__a =flatten_dict(_snake_case )
__a =pt_model.state_dict()
__a =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
__a =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__a =[]
__a =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__a =flax_key_tuple[0] == pt_model.base_model_prefix
__a ='.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__a =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__a =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_snake_case ) not in pt_model_dict:
# conv layer
__a =flax_key_tuple[:-1] + ('weight',)
__a =jnp.transpose(_snake_case , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_snake_case ) not in pt_model_dict:
# linear layer
__a =flax_key_tuple[:-1] + ('weight',)
__a =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__a =flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__a =flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
__a =flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
__a ='.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__a ='.'.join(_snake_case )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__a ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__a =key.split('.' )
__a =None
if key_components[-3::2] == ["parametrizations", "original0"]:
__a =key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
__a =key_components[-2] + '_v'
if name is not None:
__a =key_components[:-3] + [name]
__a ='.'.join(_snake_case )
__a =key
if flax_key in special_pt_names:
__a =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__a =np.asarray(_snake_case ) if not isinstance(_snake_case , np.ndarray ) else flax_tensor
__a =torch.from_numpy(_snake_case )
# remove from missing keys
missing_keys.remove(_snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_snake_case )
pt_model.load_state_dict(_snake_case )
# re-transform missing_keys to list
__a =list(_snake_case )
if len(_snake_case ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_snake_case ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'If your task is similar to the task the model of the checkpoint was trained on, '
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 367 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> Optional[Any]:
'''simple docstring'''
__a =()
for resnet, attn in zip(self.resnets , self.attentions ):
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case=True ) -> Optional[int]:
'''simple docstring'''
__a =()
for resnet in self.resnets:
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# there is always at least one resnet
__a =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__a =[]
for _ in range(self.num_layers ):
__a =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
__a =attentions
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[str]:
'''simple docstring'''
__a =self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 308 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : List[Any]=False ) -> str:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCAmelCase : List[str] = len(set_a.intersection(__lowerCAmelCase ) )
if alternative_union:
_lowerCAmelCase : Optional[int] = len(__lowerCAmelCase ) + len(__lowerCAmelCase )
else:
_lowerCAmelCase : Dict = len(set_a.union(__lowerCAmelCase ) )
return intersection / union
if isinstance(__lowerCAmelCase ,(list, tuple) ) and isinstance(__lowerCAmelCase ,(list, tuple) ):
_lowerCAmelCase : Optional[Any] = [element for element in set_a if element in set_b]
if alternative_union:
_lowerCAmelCase : Dict = len(__lowerCAmelCase ) + len(__lowerCAmelCase )
return len(__lowerCAmelCase ) / union
else:
_lowerCAmelCase : Optional[Any] = set_a + [element for element in set_b if element not in set_a]
return len(__lowerCAmelCase ) / len(__lowerCAmelCase )
return len(__lowerCAmelCase ) / len(__lowerCAmelCase )
return None
if __name__ == "__main__":
_a : Tuple = {'a', 'b', 'c', 'd', 'e'}
_a : Optional[Any] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 44 |
A__ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
assert len(str(__lowerCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
snake_case__ : Optional[int] = year // 100
snake_case__ : List[str] = (5 * (century % 4) + 2) % 7
snake_case__ : Dict = year % 100
snake_case__ : Union[str, Any] = centurian % 12
snake_case__ : List[Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
snake_case__ : List[str] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
snake_case__ : List[str] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 | 0 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase_ : List[Any] = logging.get_logger(__name__)
def __a ( _UpperCamelCase: Optional[int] , _UpperCamelCase: Dict , _UpperCamelCase: List[str] ) -> Dict:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __a ( _UpperCamelCase: np.ndarray , _UpperCamelCase: Optional[str] , _UpperCamelCase: Optional[str] = None ) -> Any:
"""simple docstring"""
_snake_case = tesseract_config if tesseract_config is not None else ""
# apply OCR
_snake_case = to_pil_image(_UpperCamelCase )
_snake_case , _snake_case = pil_image.size
_snake_case = pytesseract.image_to_data(_UpperCamelCase , lang=_UpperCamelCase , output_type="dict" , config=_UpperCamelCase )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
_snake_case = [idx for idx, word in enumerate(_UpperCamelCase ) if not word.strip()]
_snake_case = [word for idx, word in enumerate(_UpperCamelCase ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(_UpperCamelCase ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(_UpperCamelCase ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(_UpperCamelCase ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(_UpperCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_snake_case = []
for x, y, w, h in zip(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_snake_case = [x, y, x + w, y + h]
actual_boxes.append(_UpperCamelCase )
# finally, normalize the bounding boxes
_snake_case = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
assert len(_UpperCamelCase ) == len(_UpperCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = ["""pixel_values"""]
def __init__( self ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = "" ,**_SCREAMING_SNAKE_CASE ,) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
_snake_case = size if size is not None else {"height": 224, "width": 224}
_snake_case = get_size_dict(_SCREAMING_SNAKE_CASE )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = apply_ocr
_snake_case = ocr_lang
_snake_case = tesseract_config
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
_snake_case = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_snake_case = (size["height"], size["width"])
return resize(_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**_SCREAMING_SNAKE_CASE ,) -> PIL.Image.Image:
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(_SCREAMING_SNAKE_CASE )
_snake_case = resample if resample is not None else self.resample
_snake_case = apply_ocr if apply_ocr is not None else self.apply_ocr
_snake_case = ocr_lang if ocr_lang is not None else self.ocr_lang
_snake_case = tesseract_config if tesseract_config is not None else self.tesseract_config
_snake_case = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self ,"pytesseract" )
_snake_case = []
_snake_case = []
for image in images:
_snake_case , _snake_case = apply_tesseract(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
words_batch.append(_SCREAMING_SNAKE_CASE )
boxes_batch.append(_SCREAMING_SNAKE_CASE )
if do_resize:
_snake_case = [self.resize(image=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_snake_case = [flip_channel_order(_SCREAMING_SNAKE_CASE ) for image in images]
_snake_case = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for image in images]
_snake_case = BatchFeature(data={"pixel_values": images} ,tensor_type=_SCREAMING_SNAKE_CASE )
if apply_ocr:
_snake_case = words_batch
_snake_case = boxes_batch
return data
| 142 |
'''simple docstring'''
import pprint
import requests
UpperCamelCase_ : Tuple = '''https://zenquotes.io/api'''
def __a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def __a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
UpperCamelCase_ : Any = random_quotes()
pprint.pprint(response)
| 142 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _a ( unittest.TestCase ):
def __snake_case (self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case (self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_: Any = UNetaDModel(
sample_size=(32, 64), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""AttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """AttnUpBlock2D"""), )
return model
@property
def __snake_case (self ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase_: Optional[int] = UNetaDConditionModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D"""), cross_attention_dim=10, )
return model
@property
def __snake_case (self ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase_: Optional[int] = AutoencoderKL(
sample_size=(128, 64), in_channels=1, out_channels=1, latent_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D"""), up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D"""), )
UpperCAmelCase_: Optional[Any] = UNetaDModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""AttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """AttnUpBlock2D"""), )
return vqvae, unet
@slow
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: str = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: str = Mel(
x_res=self.dummy_unet.config.sample_size[1], y_res=self.dummy_unet.config.sample_size[0], )
UpperCAmelCase_: Tuple = DDPMScheduler()
UpperCAmelCase_: List[Any] = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_, unet=self.dummy_unet, mel=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase_: str = pipe(generator=SCREAMING_SNAKE_CASE_, steps=4 )
UpperCAmelCase_: Optional[Any] = output.audios[0]
UpperCAmelCase_: Optional[int] = output.images[0]
UpperCAmelCase_: Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase_: Dict = pipe(generator=SCREAMING_SNAKE_CASE_, steps=4, return_dict=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase_: Optional[Any] = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: List[Any] = np.frombuffer(image_from_tuple.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Dict = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_: int = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1], y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0], )
UpperCAmelCase_: List[str] = DDIMScheduler()
UpperCAmelCase_: int = self.dummy_vqvae_and_unet
UpperCAmelCase_: Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=dummy_vqvae_and_unet[1], mel=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
UpperCAmelCase_: Dict = np.random.uniform(-1, 1, ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCAmelCase_: List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase_: Dict = pipe(raw_audio=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, start_step=5, steps=10 )
UpperCAmelCase_: Any = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase_: Union[str, Any] = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Union[str, Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_: Union[str, Any] = self.dummy_unet_condition
UpperCAmelCase_: Union[str, Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=SCREAMING_SNAKE_CASE_, mel=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
UpperCAmelCase_: List[str] = torch.rand((1, 1, 10) )
UpperCAmelCase_: Optional[int] = pipe(generator=SCREAMING_SNAKE_CASE_, encoding=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = output.images[0]
UpperCAmelCase_: Any = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def __snake_case (self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: str = torch_device
UpperCAmelCase_: Any = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
UpperCAmelCase_: Dict = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase_: Union[str, Any] = pipe(generator=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = output.audios[0]
UpperCAmelCase_: Optional[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase_: Optional[Any] = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Any = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 147 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, ) -> Optional[Any]:
UpperCAmelCase_: Optional[int] = parent
UpperCAmelCase_: List[Any] = 13
UpperCAmelCase_: Union[str, Any] = 7
UpperCAmelCase_: Optional[Any] = True
UpperCAmelCase_: Tuple = True
UpperCAmelCase_: Dict = True
UpperCAmelCase_: str = 99
UpperCAmelCase_: Tuple = 32
UpperCAmelCase_: Optional[int] = 2
UpperCAmelCase_: Union[str, Any] = 4
UpperCAmelCase_: List[Any] = 37
UpperCAmelCase_: str = """gelu"""
UpperCAmelCase_: Dict = 0.1
UpperCAmelCase_: Optional[Any] = 0.1
UpperCAmelCase_: Optional[Any] = 512
UpperCAmelCase_: List[str] = 16
UpperCAmelCase_: Any = 2
UpperCAmelCase_: Union[str, Any] = 0.0_2
UpperCAmelCase_: List[str] = 3
UpperCAmelCase_: Tuple = 4
UpperCAmelCase_: Any = None
def __snake_case (self ) -> str:
UpperCAmelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase_: Optional[int] = None
if self.use_input_mask:
UpperCAmelCase_: Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_: List[Any] = None
UpperCAmelCase_: Optional[int] = None
UpperCAmelCase_: Tuple = None
if self.use_labels:
UpperCAmelCase_: List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase_: int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase_: List[Any] = ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase_: List[str] = EsmConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, pad_token_id=1, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case (self ) -> Optional[int]:
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
): Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_: Dict = True
UpperCAmelCase_: Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_: Dict = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCAmelCase_: Optional[int] = TFEsmModel(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
UpperCAmelCase_: Dict = model(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = [input_ids, input_mask]
UpperCAmelCase_: Dict = model(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> List[str]:
UpperCAmelCase_: Tuple = True
UpperCAmelCase_: List[Any] = TFEsmModel(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
UpperCAmelCase_: Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = [input_ids, input_mask]
UpperCAmelCase_: List[Any] = model(SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
# Also check the case where encoder outputs are not passed
UpperCAmelCase_: Tuple = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: List[Any] = TFEsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: int = self.num_labels
UpperCAmelCase_: Dict = TFEsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
UpperCAmelCase_: Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case (self ) -> Any:
UpperCAmelCase_: Dict = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
): str = config_and_inputs
UpperCAmelCase_: int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
A = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[Any] = TFEsmModelTester(self )
UpperCAmelCase_: Union[str, Any] = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def __snake_case (self ) -> Any:
self.config_tester.run_common_tests()
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> str:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_: Dict = TFEsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def __snake_case (self ) -> Tuple:
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def __snake_case (self ) -> Optional[Any]:
pass
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCAmelCase_: Any = model.get_bias()
assert isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
for k, v in name.items():
assert isinstance(SCREAMING_SNAKE_CASE_, tf.Variable )
else:
UpperCAmelCase_: Union[str, Any] = model.get_output_embeddings()
assert x is None
UpperCAmelCase_: Optional[int] = model.get_bias()
assert name is None
@require_tf
class _a ( unittest.TestCase ):
@slow
def __snake_case (self ) -> str:
UpperCAmelCase_: str = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
UpperCAmelCase_: Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCAmelCase_: Optional[int] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ), SCREAMING_SNAKE_CASE_ )
# compare the actual values for a slice.
UpperCAmelCase_: List[str] = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-2 ) )
@slow
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[Any] = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
UpperCAmelCase_: Any = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
UpperCAmelCase_: str = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-4 ) )
| 147 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def lowerCamelCase_ ( _a , _a=False ):
"""simple docstring"""
lowerCAmelCase__ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCamelCase_ ( _a , _a , _a=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ : Dict = ''''''
else:
lowerCAmelCase__ : List[str] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : Dict = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase__ : Dict = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Any = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Optional[Any] = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Dict = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Any = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_a , _a )
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Any = dct.pop(_a )
lowerCAmelCase__ : Optional[Any] = val
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ : Tuple = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = ViTConfig()
lowerCAmelCase__ : Optional[Any] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Any = int(vit_name[-12:-10] )
lowerCAmelCase__ : int = int(vit_name[-9:-6] )
else:
lowerCAmelCase__ : Dict = 1_000
lowerCAmelCase__ : str = '''huggingface/label-files'''
lowerCAmelCase__ : Dict = '''imagenet-1k-id2label.json'''
lowerCAmelCase__ : str = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ : Any = {int(_a ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = idalabel
lowerCAmelCase__ : List[str] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Tuple = int(vit_name[-6:-4] )
lowerCAmelCase__ : Union[str, Any] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
lowerCAmelCase__ : List[str] = 192
lowerCAmelCase__ : Tuple = 768
lowerCAmelCase__ : Optional[int] = 12
lowerCAmelCase__ : List[Any] = 3
elif vit_name[9:].startswith('''small''' ):
lowerCAmelCase__ : Any = 384
lowerCAmelCase__ : Optional[int] = 1_536
lowerCAmelCase__ : List[Any] = 12
lowerCAmelCase__ : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
lowerCAmelCase__ : List[str] = 768
lowerCAmelCase__ : Tuple = 2_304
lowerCAmelCase__ : Any = 8
lowerCAmelCase__ : Union[str, Any] = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
lowerCAmelCase__ : str = 1_024
lowerCAmelCase__ : Optional[Any] = 4_096
lowerCAmelCase__ : Optional[int] = 24
lowerCAmelCase__ : Tuple = 16
elif vit_name[4:].startswith('''huge''' ):
lowerCAmelCase__ : Tuple = 1_280
lowerCAmelCase__ : Tuple = 5_120
lowerCAmelCase__ : Optional[int] = 32
lowerCAmelCase__ : List[Any] = 16
# load original model from timm
lowerCAmelCase__ : Tuple = timm.create_model(_a , pretrained=_a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_a )
lowerCAmelCase__ : List[Any] = create_rename_keys(_a , _a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , _a , _a )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase__ : List[str] = ViTModel(_a ).eval()
else:
lowerCAmelCase__ : Any = ViTForImageClassification(_a ).eval()
model.load_state_dict(_a )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase__ : Dict = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase__ : int = ViTImageProcessor(size=config.image_size )
lowerCAmelCase__ : Optional[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase__ : List[str] = encoding['''pixel_values''']
lowerCAmelCase__ : List[Any] = model(_a )
if base_model:
lowerCAmelCase__ : Optional[Any] = timm_model.forward_features(_a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_a , outputs.pooler_output , atol=1e-3 )
else:
lowerCAmelCase__ : Union[str, Any] = timm_model(_a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_a , outputs.logits , atol=1e-3 )
Path(_a ).mkdir(exist_ok=_a )
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_a )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_a )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 211 |
import math
class _a :
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : Any=0 )-> Optional[Any]: # a graph with Node 0,1,...,N-1
lowerCAmelCase__ : Optional[int] = n
lowerCAmelCase__ : List[Any] = [
[math.inf for j in range(0 , _SCREAMING_SNAKE_CASE )] for i in range(0 , _SCREAMING_SNAKE_CASE )
] # adjacency matrix for weight
lowerCAmelCase__ : str = [
[math.inf for j in range(0 , _SCREAMING_SNAKE_CASE )] for i in range(0 , _SCREAMING_SNAKE_CASE )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str )-> List[str]:
lowerCAmelCase__ : Optional[int] = w
def UpperCAmelCase__( self : List[Any] )-> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowerCAmelCase__ : Dict = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str )-> str:
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 211 | 1 |
import qiskit
def snake_case ( snake_case__ :int , snake_case__ :int) -> qiskit.result.counts.Counts:
_A = qiskit.Aer.get_backend("""aer_simulator""")
# Create a Quantum Circuit acting on the q register
_A = qiskit.QuantumCircuit(snake_case__ , snake_case__)
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0)
circuit.x(1)
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1])
# Execute the circuit on the qasm simulator
_A = qiskit.execute(snake_case__ , snake_case__ , shots=1_000)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 180 | def snake_case ( snake_case__ :str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
_A = set()
# Replace all the whitespace in our sentence
_A = input_str.replace(""" """ , """""")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(snake_case__) == 26
def snake_case ( snake_case__ :str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
_A = [False] * 26
for char in input_str:
if char.islower():
_A = True
elif char.isupper():
_A = True
return all(snake_case__)
def snake_case ( snake_case__ :str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def snake_case ( ) -> None:
from timeit import timeit
_A = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=snake_case__))
print(timeit("""is_pangram_faster()""" , setup=snake_case__))
print(timeit("""is_pangram_fastest()""" , setup=snake_case__))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 180 | 1 |
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_UpperCAmelCase ) )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
# Base Case
if index == len(_UpperCAmelCase ):
return True
# Recursive Step
for i in range(_UpperCAmelCase ):
if valid_coloring(graph[index] , _UpperCAmelCase , _UpperCAmelCase ):
# Color current vertex
lowerCamelCase =i
# Validate coloring
if util_color(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , index + 1 ):
return True
# Backtrack
lowerCamelCase =-1
return False
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> list[int]:
lowerCamelCase =[-1] * len(_UpperCAmelCase )
if util_color(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 0 ):
return colored_vertices
return []
| 262 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ : Union[str, Any] =16
UpperCAmelCase__ : Any =32
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase = 16 ) -> int:
lowerCamelCase =AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase =datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase =1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase =16
elif accelerator.mixed_precision != "no":
lowerCamelCase =8
else:
lowerCamelCase =None
return tokenizer.pad(
_UpperCAmelCase , padding="""longest""" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase =DataLoader(
tokenized_datasets["""train"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
lowerCamelCase =DataLoader(
tokenized_datasets["""validation"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ : Dict =mocked_dataloaders # noqa: F811
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _UpperCAmelCase ) == "1":
lowerCamelCase =2
# New Code #
lowerCamelCase =int(args.gradient_accumulation_steps )
lowerCamelCase =int(args.local_sgd_steps )
# Initialize accelerator
lowerCamelCase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase =config["""lr"""]
lowerCamelCase =int(config["""num_epochs"""] )
lowerCamelCase =int(config["""seed"""] )
lowerCamelCase =int(config["""batch_size"""] )
lowerCamelCase =evaluate.load("""glue""" , """mrpc""" )
set_seed(_UpperCAmelCase )
lowerCamelCase , lowerCamelCase =get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase =AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
lowerCamelCase =get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
with LocalSGD(
accelerator=_UpperCAmelCase , model=_UpperCAmelCase , local_sgd_steps=_UpperCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCAmelCase ):
lowerCamelCase =model(**_UpperCAmelCase )
lowerCamelCase =output.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase =model(**_UpperCAmelCase )
lowerCamelCase =outputs.logits.argmax(dim=-1 )
lowerCamelCase , lowerCamelCase =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
lowerCamelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _UpperCAmelCase )
def _lowercase ( ) -> Any:
lowerCamelCase =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_UpperCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=_UpperCAmelCase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCamelCase =parser.parse_args()
lowerCamelCase ={"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 262 | 1 |
import math
def lowerCAmelCase_ ( __lowerCAmelCase )-> bool:
'''simple docstring'''
assert isinstance(_A , _A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase : Union[str, Any] =range(3 , int(math.sqrt(_A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=1 , **__lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =factor * value
UpperCAmelCase : Optional[int] =value
while not is_prime(_A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_A )
return value
| 348 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]=1_3 , UpperCAmelCase__ : List[str]=[3_0, 3_0] , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : int=3_7 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : str=1_0 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : Dict=1_0 , ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : str = is_training
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : str = num_labels
SCREAMING_SNAKE_CASE : Dict = scope
SCREAMING_SNAKE_CASE : Optional[Any] = n_targets
SCREAMING_SNAKE_CASE : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
SCREAMING_SNAKE_CASE : Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
SCREAMING_SNAKE_CASE : int = num_patches + 1 + self.num_detection_tokens
def _lowercase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
SCREAMING_SNAKE_CASE : str = []
for i in range(self.batch_size ):
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = torch.rand(self.n_targets , 4 , device=UpperCAmelCase__ )
labels.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = YolosModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _lowercase ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = YolosForObjectDetection(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(pixel_values=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
SCREAMING_SNAKE_CASE : int = model(pixel_values=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _lowercase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple =(YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCAmelCase__ : Any =(
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
UpperCAmelCase__ : Tuple =False
UpperCAmelCase__ : int =False
UpperCAmelCase__ : Tuple =False
UpperCAmelCase__ : Optional[Any] =False
def _lowercase ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any=False ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
SCREAMING_SNAKE_CASE : List[str] = []
for i in range(self.model_tester.batch_size ):
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCAmelCase__ , dtype=torch.long )
SCREAMING_SNAKE_CASE : str = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCAmelCase__ , dtype=torch.float )
labels.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = labels
return inputs_dict
def _lowercase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = YolosModelTester(self )
SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : List[Any] ) ->int:
"""simple docstring"""
pass
def _lowercase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def _lowercase ( self : List[Any] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = True
# in YOLOS, the seq_len is different
SCREAMING_SNAKE_CASE : Any = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
SCREAMING_SNAKE_CASE : List[str] = len(UpperCAmelCase__ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[int] = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE : str = outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowercase ( self : Any ) ->str:
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE : List[Any] = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
# YOLOS has a different seq_length
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCAmelCase__ )
@slow
def _lowercase ( self : str ) ->List[Any]:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : str = YolosModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def __lowercase ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : int ) ->Union[str, Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def _lowercase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : str = image_processor(images=UpperCAmelCase__ , return_tensors="""pt""" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(inputs.pixel_values )
# verify outputs
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] , device=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] , device=UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
# verify postprocessing
SCREAMING_SNAKE_CASE : int = image_processor.post_process_object_detection(
UpperCAmelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
SCREAMING_SNAKE_CASE : str = torch.tensor([0.99_94, 0.97_90, 0.99_64, 0.99_72, 0.98_61] ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = [7_5, 7_5, 1_7, 6_3, 1_7]
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([3_35.06_09, 79.38_48, 3_75.42_16, 1_87.24_95] ).to(UpperCAmelCase__ )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , UpperCAmelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , UpperCAmelCase__ )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , UpperCAmelCase__ ) )
| 245 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_UpperCAmelCase : List[str] = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class lowerCAmelCase ( __UpperCamelCase ):
@staticmethod
def A_ ( UpperCAmelCase : ArgumentParser ) -> int:
lowerCamelCase__ : Optional[int] = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=UpperCAmelCase , required=UpperCAmelCase , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=UpperCAmelCase , required=UpperCAmelCase , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=UpperCAmelCase , required=UpperCAmelCase , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=UpperCAmelCase , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=UpperCAmelCase , default=UpperCAmelCase , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=UpperCAmelCase )
def __init__( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , *UpperCAmelCase : List[str] , ) -> Optional[Any]:
lowerCamelCase__ : List[Any] = logging.get_logger('transformers-cli/converting' )
self._logger.info(F"""Loading model {model_type}""" )
lowerCamelCase__ : Any = model_type
lowerCamelCase__ : List[Any] = tf_checkpoint
lowerCamelCase__ : Tuple = pytorch_dump_output
lowerCamelCase__ : List[str] = config
lowerCamelCase__ : Any = finetuning_task_name
def A_ ( self : int ) -> Optional[int]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
lowerCamelCase__ : Optional[Any] = self._tf_checkpoint
lowerCamelCase__ : int = ''
else:
lowerCamelCase__ : Optional[int] = self._tf_checkpoint
lowerCamelCase__ : Tuple = ''
convert_transfo_xl_checkpoint_to_pytorch(
UpperCAmelCase , self._config , self._pytorch_dump_output , UpperCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 45 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : List[Any] = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase : Optional[int] = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
_UpperCAmelCase : int = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
_UpperCAmelCase : Dict = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = VOCAB_FILES_NAMES
__lowercase : str = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Tuple = SqueezeBertTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , A_ ) != do_lower_case
or normalizer_state.get('strip_accents' , A_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , A_ ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(A_ , normalizer_state.pop('type' ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**A_ )
UpperCamelCase = do_lower_case
def __UpperCamelCase ( self , A_ , A_=None ) -> List[str]:
"""simple docstring"""
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 222 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = "gpt_neox"
def __init__( self , A_=50_432 , A_=6_144 , A_=44 , A_=64 , A_=24_576 , A_="gelu" , A_=0.25 , A_=10_000 , A_=0.0 , A_=0.0 , A_=0.1 , A_=2_048 , A_=0.02 , A_=1e-5 , A_=True , A_=0 , A_=2 , A_=False , A_=True , A_=None , **A_ , ) -> Tuple:
"""simple docstring"""
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = rotary_pct
UpperCamelCase = rotary_emb_base
UpperCamelCase = attention_dropout
UpperCamelCase = hidden_dropout
UpperCamelCase = classifier_dropout
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_cache
UpperCamelCase = tie_word_embeddings
UpperCamelCase = use_parallel_residual
UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'''got {self.rope_scaling}''' )
UpperCamelCase = self.rope_scaling.get('type' , A_ )
UpperCamelCase = self.rope_scaling.get('factor' , A_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(A_ , A_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 222 | 1 |
"""simple docstring"""
a :Optional[Any] = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 56 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
a :Optional[int] = True
from torch.cuda.amp import autocast
a :str = logging.getLogger(__name__)
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_SCREAMING_SNAKE_CASE :Optional[bool] = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""})
_SCREAMING_SNAKE_CASE :Optional[bool] = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to log verbose messages or not."""} , )
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""})
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""})
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=0.99_99_95 , metadata={"""help""": """Decay of gumbel temperature during training."""})
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.WARNING
if model_args.verbose_logging:
SCREAMING_SNAKE_CASE__ : int = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
SCREAMING_SNAKE_CASE__ : int = logging.INFO
logger.setLevel(__lowerCAmelCase )
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = field(
default=UpperCamelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , )
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=1 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=UpperCamelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=20.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""})
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :WavaVecaForPreTraining
_SCREAMING_SNAKE_CASE :WavaVecaFeatureExtractor
_SCREAMING_SNAKE_CASE :Union[bool, str] = "longest"
_SCREAMING_SNAKE_CASE :Optional[int] = None
_SCREAMING_SNAKE_CASE :Optional[int] = None
def __call__( self , _a ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feature_extractor.pad(
_a , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
SCREAMING_SNAKE_CASE__ : Any = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
SCREAMING_SNAKE_CASE__ : List[Any] = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
SCREAMING_SNAKE_CASE__ : List[str] = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE__ : List[str] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
SCREAMING_SNAKE_CASE__ : str = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_a , min_masks=2 , )
return batch
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , *_a , _a=1 , _a=0 , _a=1.0 , **_a ) -> str:
"""simple docstring"""
super().__init__(*_a , **_a )
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : Dict = max_gumbel_temp
SCREAMING_SNAKE_CASE__ : str = min_gumbel_temp
SCREAMING_SNAKE_CASE__ : Optional[Any] = gumbel_temp_decay
def _a ( self , _a , _a ) -> torch.Tensor:
"""simple docstring"""
model.train()
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_inputs(_a )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.compute_loss(_a , _a )
else:
SCREAMING_SNAKE_CASE__ : str = self.compute_loss(_a , _a )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE__ : str = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE__ : Dict = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE__ : str = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_a ).backward()
elif self.use_apex:
with amp.scale_loss(_a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_a )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def _lowercase ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = parser.parse_args_into_dataclasses()
configure_logger(__lowerCAmelCase , __lowerCAmelCase )
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE__ : Any = DatasetDict()
SCREAMING_SNAKE_CASE__ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE__ : str = DatasetDict()
SCREAMING_SNAKE_CASE__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__lowerCAmelCase )
def prepare_dataset(__lowerCAmelCase ):
# check that all files have the correct sampling rate
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
SCREAMING_SNAKE_CASE__ : Union[str, Any] = datasets.map(
__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
SCREAMING_SNAKE_CASE__ : List[str] = vectorized_datasets.filter(
lambda __lowerCAmelCase : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__lowerCAmelCase ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
SCREAMING_SNAKE_CASE__ : str = vectorized_datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaForPreTraining(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = DataCollatorForWavaVecaPretraining(model=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = WavaVecaPreTrainer(
model=__lowerCAmelCase , data_collator=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=__lowerCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 56 | 1 |
import numpy as np
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : List[Any] ):
UpperCamelCase_ : Optional[int] = int(np.ceil((x_end - xa) / h ) )
UpperCamelCase_ : Optional[int] = np.zeros((n + 1,) )
UpperCamelCase_ : Dict = ya
UpperCamelCase_ : int = xa
for k in range(lowerCamelCase ):
UpperCamelCase_ : int = f(lowerCamelCase , y[k] )
UpperCamelCase_ : Optional[int] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCamelCase_ : List[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCamelCase_ : Optional[Any] = f(x + h , y[k] + h * ka )
UpperCamelCase_ : str = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 | import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
a_ = parser.parse_args()
if args.model_type == "roberta":
a_ = RobertaForMaskedLM.from_pretrained(args.model_name)
a_ = 'roberta'
elif args.model_type == "gpt2":
a_ = GPTaLMHeadModel.from_pretrained(args.model_name)
a_ = 'transformer'
a_ = model.state_dict()
a_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a_ = state_dict[F"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a_ = F"""{prefix}.embeddings.{w}.weight"""
a_ = state_dict[param_name]
for w in ["weight", "bias"]:
a_ = F"""{prefix}.embeddings.LayerNorm.{w}"""
a_ = state_dict[param_name]
# Transformer Blocks #
a_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a_ = state_dict[
F"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
a_ = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a_ = state_dict[F"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ = state_dict[F"""lm_head.dense.{w}"""]
a_ = state_dict[F"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a_ = state_dict[F"""{prefix}.ln_f.{w}"""]
a_ = state_dict['lm_head.weight']
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 175 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE_ = BitConfig(
conv_layer=__lowerCamelCase, num_labels=10_00, idalabel=__lowerCamelCase, labelaid=__lowerCamelCase, )
return config
def A__ ( __lowerCamelCase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''stem.conv''', '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head.fc''', '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE_ = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE_ = '''bit.encoder.''' + name
return name
def A__ ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
SCREAMING_SNAKE_CASE_ = get_config(__lowerCamelCase )
# load original model from timm
SCREAMING_SNAKE_CASE_ = create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE_ = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = BitForImageClassification(__lowerCamelCase )
model.eval()
model.load_state_dict(__lowerCamelCase )
# create image processor
SCREAMING_SNAKE_CASE_ = create_transform(**resolve_data_config({}, model=__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = transform.transforms
SCREAMING_SNAKE_CASE_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__lowerCamelCase, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=__lowerCamelCase, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=__lowerCamelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = transform(__lowerCamelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = processor(__lowerCamelCase, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__lowerCamelCase, __lowerCamelCase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
print('''Logits:''', logits[0, :3] )
print('''Predicted class:''', model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE_ = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 257 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[Any]=3_2 , SCREAMING_SNAKE_CASE_ : Any=1_6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=3_2 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : List[str]=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_ : int=4 , SCREAMING_SNAKE_CASE_ : int=3_7 , SCREAMING_SNAKE_CASE_ : int="gelu" , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Any=[1, 3_8_4, 2_4, 2_4] , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , ):
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : List[str] = batch_size
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Any = patch_size
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : Union[str, Any] = is_training
lowerCAmelCase_ : Union[str, Any] = use_labels
lowerCAmelCase_ : Optional[int] = hidden_size
lowerCAmelCase_ : List[str] = num_hidden_layers
lowerCAmelCase_ : Tuple = backbone_out_indices
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Optional[int] = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : Tuple = num_labels
lowerCAmelCase_ : Optional[int] = backbone_featmap_shape
lowerCAmelCase_ : Tuple = scope
lowerCAmelCase_ : int = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase_ : List[Any] = (image_size // patch_size) ** 2
lowerCAmelCase_ : Union[str, Any] = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase_ : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : List[Any] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [9_6, 1_9_2, 3_8_4, 7_6_8],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase_ : Optional[int] = DPTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Any = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : Dict = self.num_labels
lowerCAmelCase_ : Dict = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase_ : Union[str, Any] = self.num_labels
lowerCAmelCase_ : str = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : int = config_and_inputs
lowerCAmelCase_ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : List[str] = DPTModelTester(self )
lowerCAmelCase_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ ,lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ ,lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCAmelCase_ ,lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Tuple = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
lowerCAmelCase_ : int = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
lowerCAmelCase_ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : int ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCAmelCase_ ,lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Union[str, Any] = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
lowerCAmelCase_ : Dict = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase_ : List[str] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ ,lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : List[Any] = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[int] = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
lowerCAmelCase_ : Union[str, Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowerCAmelCase_ : List[Any] = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowerCAmelCase_ : Tuple = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowerCAmelCase_ ,lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Dict = 'add'
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Optional[int] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase_ ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : str = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
lowerCAmelCase_ : str = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : int = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = outputs.predicted_depth
# verify the predicted depth
lowerCAmelCase_ : List[Any] = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 224 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowercase__ : List[Any] = logging.getLogger(__name__)
def UpperCamelCase_ ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Tuple = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=lowerCAmelCase__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=lowerCAmelCase__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=lowerCAmelCase__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=lowerCAmelCase__ , default=1000 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=lowerCAmelCase__ , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=lowerCAmelCase__ , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=lowerCAmelCase__ , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
lowerCAmelCase_ : List[Any] = parser.parse_args()
return args
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
def fn(lowerCAmelCase__ : Optional[Any] ):
return tokenizer(examples['text'] )
return fn
def UpperCamelCase_ ( lowerCAmelCase__ : Tuple ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : int = []
for i in range(len(tokenized_data['input_ids'] ) ):
lowerCAmelCase_ : Tuple = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
lowerCAmelCase_ : Union[str, Any] = tf.train.Features(feature=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = tf.train.Example(features=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = example.SerializeToString()
records.append(lowerCAmelCase__ )
return records
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : str = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase_ : Tuple = min(len(lowerCAmelCase__ ) , args.limit )
lowerCAmelCase_ : Any = dataset.select(range(lowerCAmelCase__ ) )
print(f"Limiting the dataset to {args.limit} entries." )
lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase_ : int = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Dict = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase_ : Dict = tokenize_function(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = dataset.map(lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCAmelCase__ : List[Any] ):
# Concatenate all texts.
lowerCAmelCase_ : int = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase_ : Any = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase_ : Union[str, Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase_ : Optional[Any] = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCAmelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase_ : Optional[int] = dataset_tokenized.map(lowerCAmelCase__ , batched=lowerCAmelCase__ , batch_size=1000 , num_proc=4 )
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[Any] = 0
for shard in range(0 , len(lowerCAmelCase__ ) , args.shard_size ):
lowerCAmelCase_ : Dict = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase_ : Tuple = len(dataset_snapshot['input_ids'] )
lowerCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase__ , f"dataset-{shard_count}-{records_containing}.tfrecord" )
lowerCAmelCase_ : Tuple = get_serialized_examples(lowerCAmelCase__ )
with tf.io.TFRecordWriter(lowerCAmelCase__ ) as out_file:
for i in range(len(lowerCAmelCase__ ) ):
lowerCAmelCase_ : Dict = serialized_examples[i]
out_file.write(lowerCAmelCase__ )
print('Wrote file {} containing {} records'.format(lowerCAmelCase__ , lowerCAmelCase__ ) )
shard_count += 1
total_records += records_containing
with open(f"split-{args.split}-records-count.txt" , 'w' ) as f:
print(f"Total {args.split} records: {total_records}" , file=lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ : int = parse_args()
main(args)
| 224 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : List[str] = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'deberta-v2'
def __init__( self , _a=128_100 , _a=1_536 , _a=24 , _a=24 , _a=6_144 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0 , _a=0.02 , _a=1e-7 , _a=False , _a=-1 , _a=0 , _a=True , _a=None , _a=0 , _a="gelu" , **_a , ):
super().__init__(**_a )
__magic_name__ : Tuple = hidden_size
__magic_name__ : int = num_hidden_layers
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : Dict = intermediate_size
__magic_name__ : List[str] = hidden_act
__magic_name__ : Dict = hidden_dropout_prob
__magic_name__ : Dict = attention_probs_dropout_prob
__magic_name__ : Union[str, Any] = max_position_embeddings
__magic_name__ : List[Any] = type_vocab_size
__magic_name__ : Tuple = initializer_range
__magic_name__ : Dict = relative_attention
__magic_name__ : Tuple = max_relative_positions
__magic_name__ : str = pad_token_id
__magic_name__ : Optional[Any] = position_biased_input
# Backwards compatibility
if type(_a ) == str:
__magic_name__ : str = [x.strip() for x in pos_att_type.lower().split("|" )]
__magic_name__ : str = pos_att_type
__magic_name__ : int = vocab_size
__magic_name__ : List[Any] = layer_norm_eps
__magic_name__ : Union[str, Any] = kwargs.get("pooler_hidden_size" , _a )
__magic_name__ : str = pooler_dropout
__magic_name__ : Optional[Any] = pooler_hidden_act
class _snake_case ( snake_case ):
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "multiple-choice":
__magic_name__ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__magic_name__ : int = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE ( self ):
return 12
def SCREAMING_SNAKE_CASE ( self , _a , _a = -1 , _a = -1 , _a = -1 , _a = False , _a = None , _a = 3 , _a = 40 , _a = 40 , _a = None , ):
__magic_name__ : List[str] = super().generate_dummy_inputs(preprocessor=_a , framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 41 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=3 , _a=0.6 , _a=None , ):
__magic_name__ : Tuple = parent
__magic_name__ : Tuple = batch_size
__magic_name__ : int = image_size
__magic_name__ : Optional[Any] = patch_size
__magic_name__ : int = num_channels
__magic_name__ : Dict = is_training
__magic_name__ : Tuple = use_labels
__magic_name__ : List[str] = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Optional[Any] = num_attention_heads
__magic_name__ : int = intermediate_size
__magic_name__ : int = hidden_act
__magic_name__ : Optional[Any] = hidden_dropout_prob
__magic_name__ : List[Any] = attention_probs_dropout_prob
__magic_name__ : Optional[Any] = type_sequence_label_size
__magic_name__ : Optional[Any] = initializer_range
__magic_name__ : List[str] = mask_ratio
__magic_name__ : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__magic_name__ : Union[str, Any] = (image_size // patch_size) ** 2
__magic_name__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Tuple = None
if self.use_labels:
__magic_name__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : List[str] = TFViTMAEModel(config=_a )
__magic_name__ : List[Any] = model(_a , training=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Dict = TFViTMAEForPreTraining(_a )
__magic_name__ : Any = model(_a , training=_a )
# expected sequence length = num_patches
__magic_name__ : Tuple = (self.image_size // self.patch_size) ** 2
__magic_name__ : Dict = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__magic_name__ : Dict = 1
__magic_name__ : int = TFViTMAEForPreTraining(_a )
__magic_name__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Any = model(_a , training=_a )
__magic_name__ : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
((__magic_name__) , (__magic_name__) , (__magic_name__)) : List[Any] = config_and_inputs
__magic_name__ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCamelCase__ = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = TFViTMAEModelTester(self )
__magic_name__ : List[str] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Dict = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__magic_name__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , tf.keras.layers.Layer ) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(_a )
__magic_name__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : int = [*signature.parameters.keys()]
__magic_name__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
def SCREAMING_SNAKE_CASE ( self ):
# make the mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ : str = model_class(_a )
__magic_name__ : List[Any] = self._prepare_for_class(_a , _a )
__magic_name__ : Union[str, Any] = model(_a , noise=_a )
__magic_name__ : int = copy.deepcopy(self._prepare_for_class(_a , _a ) )
__magic_name__ : str = model(**_a , noise=_a )
__magic_name__ : Optional[int] = outputs_dict[0].numpy()
__magic_name__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def SCREAMING_SNAKE_CASE ( self ):
# make the mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_a ):
__magic_name__ : Union[str, Any] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_a ):
__magic_name__ : List[str] = v.numpy()
else:
__magic_name__ : str = np.array(_a )
return inputs_np_dict
for model_class in self.all_model_classes:
__magic_name__ : Optional[Any] = model_class(_a )
__magic_name__ : int = self._prepare_for_class(_a , _a )
__magic_name__ : Optional[Any] = prepare_numpy_arrays(_a )
__magic_name__ : Union[str, Any] = model(_a , noise=_a )
__magic_name__ : int = model(**_a , noise=_a )
self.assert_outputs_same(_a , _a )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
# make masks reproducible
np.random.seed(2 )
__magic_name__ : Union[str, Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
__magic_name__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__magic_name__ : Dict = tf.constant(_a )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__magic_name__ : List[str] = tf_noise
super().check_pt_tf_models(_a , _a , _a )
def SCREAMING_SNAKE_CASE ( self ):
# make mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Any = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_a )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(_a , _a ),)
if isinstance(_a , _a )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_a , "_keras_serializable" , _a )
}
__magic_name__ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__magic_name__ : Optional[int] = tf.convert_to_tensor(_a )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
__magic_name__ : Optional[int] = main_layer_class(_a )
__magic_name__ : Optional[int] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
__magic_name__ : Any = tf.keras.Model(_a , outputs=main_layer(_a ) )
__magic_name__ : str = model(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ : Dict = os.path.join(_a , "keras_model.h5" )
model.save(_a )
__magic_name__ : Optional[int] = tf.keras.models.load_model(
_a , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_a , tf.keras.Model )
__magic_name__ : Optional[Any] = model(_a )
self.assert_outputs_same(_a , _a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# make mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ : int = model_class(_a )
__magic_name__ : Tuple = self._prepare_for_class(_a , _a )
__magic_name__ : List[Any] = model(_a , noise=_a )
if model_class.__name__ == "TFViTMAEModel":
__magic_name__ : Optional[int] = outputs.last_hidden_state.numpy()
__magic_name__ : int = 0
else:
__magic_name__ : List[Any] = outputs.logits.numpy()
__magic_name__ : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a , saved_model=_a )
__magic_name__ : List[str] = model_class.from_pretrained(_a )
__magic_name__ : Optional[Any] = model(_a , noise=_a )
if model_class.__name__ == "TFViTMAEModel":
__magic_name__ : int = after_outputs["last_hidden_state"].numpy()
__magic_name__ : str = 0
else:
__magic_name__ : Any = after_outputs["logits"].numpy()
__magic_name__ : List[str] = 0
__magic_name__ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_a , 1e-5 )
def SCREAMING_SNAKE_CASE ( self ):
# make mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : str = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ : List[Any] = model_class(_a )
__magic_name__ : List[str] = self._prepare_for_class(_a , _a )
__magic_name__ : Any = model(_a , noise=_a )
__magic_name__ : Optional[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_a )
__magic_name__ : Optional[int] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
__magic_name__ : Optional[Any] = model_class.from_config(model.config )
__magic_name__ : Tuple = new_model(_a ) # Build model
new_model.set_weights(model.get_weights() )
__magic_name__ : List[str] = new_model(_a , noise=_a )
self.assert_outputs_same(_a , _a )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
__magic_name__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__magic_name__ : Dict = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
__magic_name__ : str = self.default_image_processor
__magic_name__ : int = prepare_img()
__magic_name__ : Union[str, Any] = image_processor(images=_a , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__magic_name__ : Optional[int] = ViTMAEConfig()
__magic_name__ : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__magic_name__ : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
__magic_name__ : Tuple = model(**_a , noise=_a )
# verify the logits
__magic_name__ : str = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _a )
__magic_name__ : Union[str, Any] = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _a , atol=1e-4 )
| 41 | 1 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
snake_case__ : List[str] = AutoConfig.from_pretrained(_lowerCAmelCase )
snake_case__ : Tuple = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCAmelCase )
snake_case__ : Union[str, Any] = checkpoints.load_tax_checkpoint(_lowerCAmelCase )
snake_case__ : Dict = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
snake_case__ : str = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
snake_case__ : List[Any] = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case__ : Optional[Any] = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
"""Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`"""
""" attribute with a value from [\'local\', \'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
snake_case__ : Dict = f"layers_{str(_lowerCAmelCase )}"
# Self-Attention
snake_case__ : Any = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
snake_case__ : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
snake_case__ : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
snake_case__ : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case__ : List[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
snake_case__ : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
snake_case__ : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
snake_case__ : int = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
snake_case__ : List[Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
snake_case__ : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
snake_case__ : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
snake_case__ : List[Any] = flax_model.params['''encoder''']['''block'''][str(_lowerCAmelCase )]['''layer''']
snake_case__ : Union[str, Any] = tax_attention_key
snake_case__ : int = tax_attention_out
snake_case__ : Union[str, Any] = tax_attention_query
snake_case__ : Any = tax_attention_value
snake_case__ : str = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case__ : List[Any] = tax_global_layer_norm
if split_mlp_wi:
snake_case__ : str = tax_mlp_wi_a
snake_case__ : int = tax_mlp_wi_a
else:
snake_case__ : List[str] = tax_mlp_wi
snake_case__ : Any = tax_mlp_wo
snake_case__ : int = tax_mlp_layer_norm
snake_case__ : List[Any] = flax_model_encoder_layer_block
# Only for layer 0:
snake_case__ : Tuple = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
snake_case__ : Any = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case__ : Tuple = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
snake_case__ : int = tax_encoder_global_rel_embedding
# Assigning
snake_case__ : int = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
snake_case__ : List[Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
snake_case__ : str = f"layers_{str(_lowerCAmelCase )}"
# Self-Attention
snake_case__ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
snake_case__ : int = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
snake_case__ : Dict = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
snake_case__ : Dict = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
snake_case__ : Dict = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
snake_case__ : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
snake_case__ : List[Any] = tax_enc_dec_attention_module['''key''']['''kernel''']
snake_case__ : Any = tax_enc_dec_attention_module['''out''']['''kernel''']
snake_case__ : Optional[int] = tax_enc_dec_attention_module['''query''']['''kernel''']
snake_case__ : str = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
snake_case__ : Dict = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
snake_case__ : Dict = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
snake_case__ : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
snake_case__ : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
snake_case__ : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
snake_case__ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
snake_case__ : List[Any] = flax_model.params['''decoder''']['''block'''][str(_lowerCAmelCase )]['''layer''']
snake_case__ : Optional[int] = tax_attention_key
snake_case__ : Optional[int] = tax_attention_out
snake_case__ : Union[str, Any] = tax_attention_query
snake_case__ : List[str] = tax_attention_value
snake_case__ : List[str] = tax_pre_attention_layer_norm
snake_case__ : List[str] = tax_enc_dec_attention_key
snake_case__ : int = tax_enc_dec_attention_out
snake_case__ : Dict = tax_enc_dec_attention_query
snake_case__ : List[str] = tax_enc_dec_attention_value
snake_case__ : str = tax_cross_layer_norm
if split_mlp_wi:
snake_case__ : Tuple = tax_mlp_wi_a
snake_case__ : int = tax_mlp_wi_a
else:
snake_case__ : List[str] = tax_mlp_wi
snake_case__ : Union[str, Any] = tax_mlp_wo
snake_case__ : str = txa_mlp_layer_norm
snake_case__ : str = flax_model_decoder_layer_block
# Decoder Normalization
snake_case__ : List[Any] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
snake_case__ : Any = txa_decoder_norm
# Only for layer 0:
snake_case__ : str = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
snake_case__ : Optional[int] = tax_decoder_rel_embedding
# Token Embeddings
snake_case__ : Union[str, Any] = tax_model['''target''']['''token_embedder''']['''embedding''']
snake_case__ : str = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
snake_case__ : Optional[Any] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(_lowerCAmelCase )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
__a = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 35 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : list[int | float] , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | float:
if len(_lowerCAmelCase ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(_lowerCAmelCase )
or left < -len(_lowerCAmelCase )
or right >= len(_lowerCAmelCase )
or right < -len(_lowerCAmelCase )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
UpperCAmelCase : List[Any] = (left + right) >> 1 # the middle
UpperCAmelCase : Optional[Any] = find_max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # find max in range[left, mid]
UpperCAmelCase : Dict = find_max(_lowerCAmelCase , mid + 1 , _lowerCAmelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 23 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple=None ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = argparse.ArgumentParser(add_help=SCREAMING_SNAKE_CASE__ , allow_abbrev=SCREAMING_SNAKE_CASE__ )
# The main config parser
_UpperCAmelCase : int = config_command_parser(SCREAMING_SNAKE_CASE__ )
# The subparser to add commands to
_UpperCAmelCase : List[str] = config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(SCREAMING_SNAKE_CASE__ , parents=[parent_parser] )
update_command_parser(SCREAMING_SNAKE_CASE__ , parents=[parent_parser] )
return config_parser
def __snake_case ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = get_config_parser()
_UpperCAmelCase : int = config_parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE__ , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 202 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
def __init__( self : Tuple , A : str , A : Dict=2 , A : List[Any]=3_2 , A : Optional[Any]=1_6 , A : Tuple=3 , A : Optional[Any]=True , A : List[Any]=True , A : Optional[int]=3_2 , A : Optional[int]=4 , A : Tuple=[0, 1, 2, 3] , A : Optional[int]=4 , A : Tuple=3_7 , A : List[Any]="gelu" , A : List[Any]=0.1 , A : List[str]=0.1 , A : Union[str, Any]=0.02 , A : Optional[int]=3 , A : Optional[Any]=[1, 3_8_4, 2_4, 2_4] , A : Union[str, Any]=True , A : Any=None , ):
_UpperCAmelCase : str = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : List[str] = image_size
_UpperCAmelCase : Tuple = patch_size
_UpperCAmelCase : Any = num_channels
_UpperCAmelCase : List[str] = is_training
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = backbone_out_indices
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Optional[int] = intermediate_size
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Optional[Any] = num_labels
_UpperCAmelCase : Tuple = backbone_featmap_shape
_UpperCAmelCase : Optional[Any] = scope
_UpperCAmelCase : Union[str, Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCAmelCase : int = num_patches + 1
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Dict = None
if self.use_labels:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self : str ):
_UpperCAmelCase : Tuple = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [9_6, 1_9_2, 3_8_4, 7_6_8],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=A , backbone_featmap_shape=self.backbone_featmap_shape , )
def snake_case_ ( self : Any , A : Optional[Any] , A : str , A : Dict ):
_UpperCAmelCase : List[str] = DPTModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self : List[str] , A : str , A : Any , A : List[Any] ):
_UpperCAmelCase : str = self.num_labels
_UpperCAmelCase : Any = DPTForDepthEstimation(A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def snake_case_ ( self : List[Any] , A : Any , A : Optional[int] , A : Union[str, Any] ):
_UpperCAmelCase : List[Any] = self.num_labels
_UpperCAmelCase : Union[str, Any] = DPTForSemanticSegmentation(A )
model.to(A )
model.eval()
_UpperCAmelCase : int = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = config_and_inputs
_UpperCAmelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Any = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
def snake_case_ ( self : int ):
_UpperCAmelCase : List[str] = DPTModelTester(self )
_UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def snake_case_ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def snake_case_ ( self : Union[str, Any] ):
pass
def snake_case_ ( self : Tuple ):
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : str = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(A )
_UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : str = [*signature.parameters.keys()]
_UpperCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*A )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
def snake_case_ ( self : Optional[int] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Optional[int] = True
if model_class in get_values(A ):
continue
_UpperCAmelCase : int = model_class(A )
model.to(A )
model.train()
_UpperCAmelCase : Optional[Any] = self._prepare_for_class(A , A , return_labels=A )
_UpperCAmelCase : Optional[Any] = model(**A ).loss
loss.backward()
def snake_case_ ( self : Dict ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Tuple = True
if model_class in get_values(A ) or not model_class.supports_gradient_checkpointing:
continue
_UpperCAmelCase : int = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase : str = self._prepare_for_class(A , A , return_labels=A )
_UpperCAmelCase : List[str] = model(**A ).loss
loss.backward()
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : int = _config_zero_init(A )
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class(config=A )
# Skip the check for the backbone
_UpperCAmelCase : Dict = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_UpperCAmelCase : List[str] = [f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case_ ( self : int ):
pass
@slow
def snake_case_ ( self : Dict ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_UpperCAmelCase : Any = DPTModel.from_pretrained(A )
self.assertIsNotNone(A )
def snake_case_ ( self : Tuple ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Optional[Any] = "add"
with self.assertRaises(A ):
_UpperCAmelCase : List[Any] = DPTForDepthEstimation(A )
def __snake_case ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Optional[int] = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
_UpperCAmelCase : Any = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(A )
_UpperCAmelCase : str = prepare_img()
_UpperCAmelCase : Tuple = image_processor(images=A , return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
_UpperCAmelCase : int = model(**A )
_UpperCAmelCase : List[str] = outputs.predicted_depth
# verify the predicted depth
_UpperCAmelCase : int = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , A )
_UpperCAmelCase : int = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , A , atol=1e-4 ) )
| 202 | 1 |
'''simple docstring'''
UpperCamelCase__ = {
'''km/h''': 1.0,
'''m/s''': 3.6,
'''mph''': 1.609_344,
'''knot''': 1.852,
}
UpperCamelCase__ = {
'''km/h''': 1.0,
'''m/s''': 0.277_777_778,
'''mph''': 0.621_371_192,
'''knot''': 0.539_956_803,
}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
UpperCAmelCase__ : List[str] = (
F"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
F"""Valid values are: {", ".join(lowerCAmelCase__ )}"""
)
raise ValueError(lowerCAmelCase__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 |
import colorsys
from PIL import Image # type: ignore
def __UpperCamelCase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : int ):
__a : Any = x
__a : List[Any] = y
for step in range(lowerCAmelCase__ ): # noqa: B007
__a : List[Any] = a * a - b * b + x
__a : Tuple = 2 * a * b + y
__a : Optional[int] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __UpperCamelCase ( lowerCAmelCase__ : float ):
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def __UpperCamelCase ( lowerCAmelCase__ : float ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) )
def __UpperCamelCase ( lowerCAmelCase__ : int = 8_0_0 , lowerCAmelCase__ : int = 6_0_0 , lowerCAmelCase__ : float = -0.6 , lowerCAmelCase__ : float = 0 , lowerCAmelCase__ : float = 3.2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : bool = True , ):
__a : int = Image.new('''RGB''' , (image_width, image_height) )
__a : Dict = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
__a : Optional[Any] = figure_width / image_width * image_height
__a : str = figure_center_x + (image_x / image_width - 0.5) * figure_width
__a : str = figure_center_y + (image_y / image_height - 0.5) * figure_height
__a : Tuple = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__a : Optional[int] = get_color_coded_rgb(lowerCAmelCase__ )
else:
__a : Optional[Any] = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowercase__ =get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 216 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="Speech2TextFeatureExtractor"
UpperCAmelCase_ ="Speech2TextTokenizer"
def __init__( self , _A , _A ) -> List[Any]:
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
def __call__( self , *_A , **_A ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _UpperCamelCase ( self , *_A , **_A ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> int:
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _UpperCamelCase ( self ) -> int:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 257 |
__UpperCAmelCase = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 257 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase : List[Any] = None
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : Optional[int] = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
lowerCamelCase : List[Any] = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
lowerCamelCase : Any = "▁"
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''token_type_ids''']
UpperCamelCase = FNetTokenizer
def __init__( self : Optional[int] , A_ : Any=None , A_ : int=None , A_ : int=False , A_ : Optional[int]=True , A_ : List[Any]=True , A_ : Tuple="<unk>" , A_ : Optional[int]="[SEP]" , A_ : List[Any]="<pad>" , A_ : Optional[int]="[CLS]" , A_ : Optional[Any]="[MASK]" , **A_ : Dict , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = (
AddedToken(A_ , lstrip=A_ , rstrip=A_ , normalized=A_ )
if isinstance(A_ , A_ )
else mask_token
)
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , **A_ , )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self : Dict , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 204 |
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10_00 ):
'''simple docstring'''
lowerCamelCase_ = 2**power
lowerCamelCase_ = 0
while n:
lowerCamelCase_ , lowerCamelCase_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 204 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = DanceDiffusionPipeline
a_ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
a_ = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
a_ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
a_ = False
a_ = False
def lowercase ( self : List[Any] ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowerCAmelCase_ , use_timestep_embedding=lowerCAmelCase_ , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
__lowerCAmelCase = IPNDMScheduler()
__lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=0 ) -> Any:
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def lowercase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = DanceDiffusionPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__lowerCAmelCase = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowercase ( self : Union[str, Any] ) -> Tuple:
return super().test_save_load_local()
@skip_mps
def lowercase ( self : List[str] ) -> Dict:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def lowercase ( self : str ) -> List[str]:
return super().test_save_load_optional_components()
@skip_mps
def lowercase ( self : List[Any] ) -> List[str]:
return super().test_attention_slicing_forward_pass()
def lowercase ( self : str ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowerCAmelCase = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : Tuple ) -> Dict:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowerCAmelCase = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 369 |
_snake_case : List[str] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
_snake_case : List[Any] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
__lowerCAmelCase = from_type.lower().strip('s' )
__lowerCAmelCase = to_type.lower().strip('s' )
__lowerCAmelCase = UNIT_SYMBOL.get(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = UNIT_SYMBOL.get(lowerCAmelCase_, lowerCAmelCase_ )
if from_sanitized not in METRIC_CONVERSION:
__lowerCAmelCase = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowerCAmelCase_ )}"""
)
raise ValueError(lowerCAmelCase_ )
if to_sanitized not in METRIC_CONVERSION:
__lowerCAmelCase = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowerCAmelCase_ )}"""
)
raise ValueError(lowerCAmelCase_ )
__lowerCAmelCase = METRIC_CONVERSION[from_sanitized]
__lowerCAmelCase = METRIC_CONVERSION[to_sanitized]
__lowerCAmelCase = 1
if from_exponent > to_exponent:
__lowerCAmelCase = from_exponent - to_exponent
else:
__lowerCAmelCase = -(to_exponent - from_exponent)
return value * pow(10, lowerCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 207 | 0 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( *A : Any ,**A : int ):
pass
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE :List[Any] = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
snake_case_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCamelCase_ ( self : int ,A : Optional[int] ,A : List[str] ,A : Union[str, Any] ):
__A = pipeline(
"document-question-answering" ,model=A ,tokenizer=A ,image_processor=A )
__A = INVOICE_URL
__A = list(zip(*apply_tesseract(load_image(A ) ,A ,"" ) ) )
__A = "What is the placebo?"
__A = [
{
"image": load_image(A ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def UpperCamelCase_ ( self : Optional[int] ,A : Any ,A : List[str] ):
__A = dqa_pipeline(A ,top_k=2 )
self.assertEqual(
A ,[
[
{"score": ANY(A ), "answer": ANY(A ), "start": ANY(A ), "end": ANY(A )},
{"score": ANY(A ), "answer": ANY(A ), "start": ANY(A ), "end": ANY(A )},
]
]
* 3 ,)
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase_ ( self : str ):
__A = pipeline("document-question-answering" ,model="hf-internal-testing/tiny-random-layoutlmv2" )
__A = INVOICE_URL
__A = "How many cats are there?"
__A = [
{"score": 0.00_01, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.00_01, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
__A = dqa_pipeline(image=A ,question=A ,top_k=2 )
self.assertEqual(nested_simplify(A ,decimals=4 ) ,A )
__A = dqa_pipeline({"image": image, "question": question} ,top_k=2 )
self.assertEqual(nested_simplify(A ,decimals=4 ) ,A )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__A = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__A = dqa_pipeline(image=A ,question=A ,top_k=2 )
self.assertEqual(A ,[] )
# We can optionnally pass directly the words and bounding boxes
__A = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__A = []
__A = []
__A = dqa_pipeline(image=A ,question=A ,words=A ,boxes=A ,top_k=2 )
self.assertEqual(A ,[] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase_ ( self : int ):
__A = pipeline(
"document-question-answering" ,model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" ,revision="9977165" ,)
__A = INVOICE_URL
__A = "What is the invoice number?"
__A = dqa_pipeline(image=A ,question=A ,top_k=2 )
self.assertEqual(
nested_simplify(A ,decimals=4 ) ,[
{"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16},
] ,)
__A = dqa_pipeline({"image": image, "question": question} ,top_k=2 )
self.assertEqual(
nested_simplify(A ,decimals=4 ) ,[
{"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16},
] ,)
__A = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] ,top_k=2 )
self.assertEqual(
nested_simplify(A ,decimals=4 ) ,[
[
{"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 ,)
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase_ ( self : Tuple ):
__A = pipeline(
"document-question-answering" ,model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" ,revision="9977165" ,max_seq_len=50 ,)
__A = INVOICE_URL
__A = "What is the invoice number?"
__A = dqa_pipeline(image=A ,question=A ,top_k=2 )
self.assertEqual(
nested_simplify(A ,decimals=4 ) ,[
{"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16},
] ,)
__A = dqa_pipeline({"image": image, "question": question} ,top_k=2 )
self.assertEqual(
nested_simplify(A ,decimals=4 ) ,[
{"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16},
] ,)
__A = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] ,top_k=2 )
self.assertEqual(
nested_simplify(A ,decimals=4 ) ,[
[
{"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase_ ( self : Optional[int] ):
__A = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" ,revision="3dc6de3" ,add_prefix_space=A )
__A = pipeline(
"document-question-answering" ,model="impira/layoutlm-document-qa" ,tokenizer=A ,revision="3dc6de3" ,)
__A = INVOICE_URL
__A = "What is the invoice number?"
__A = dqa_pipeline(image=A ,question=A ,top_k=2 )
self.assertEqual(
nested_simplify(A ,decimals=4 ) ,[
{"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23},
] ,)
__A = dqa_pipeline({"image": image, "question": question} ,top_k=2 )
self.assertEqual(
nested_simplify(A ,decimals=4 ) ,[
{"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23},
] ,)
__A = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] ,top_k=2 )
self.assertEqual(
nested_simplify(A ,decimals=4 ) ,[
[
{"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 ,)
__A = list(zip(*apply_tesseract(load_image(A ) ,A ,"" ) ) )
# This model should also work if `image` is set to None
__A = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} ,top_k=2 )
self.assertEqual(
nested_simplify(A ,decimals=4 ) ,[
{"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23},
] ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase_ ( self : List[Any] ):
__A = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" ,revision="3dc6de3" ,add_prefix_space=A )
__A = pipeline(
"document-question-answering" ,model="impira/layoutlm-document-qa" ,tokenizer=A ,revision="3dc6de3" ,max_seq_len=50 ,)
__A = INVOICE_URL
__A = "What is the invoice number?"
__A = dqa_pipeline(image=A ,question=A ,top_k=2 )
self.assertEqual(
nested_simplify(A ,decimals=4 ) ,[
{"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16},
] ,)
__A = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] ,top_k=2 )
self.assertEqual(
nested_simplify(A ,decimals=4 ) ,[
[
{"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 ,)
__A = list(zip(*apply_tesseract(load_image(A ) ,A ,"" ) ) )
# This model should also work if `image` is set to None
__A = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} ,top_k=2 )
self.assertEqual(
nested_simplify(A ,decimals=4 ) ,[
{"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16},
] ,)
@slow
@require_torch
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = pipeline(
"document-question-answering" ,model="naver-clova-ix/donut-base-finetuned-docvqa" ,tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) ,feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" ,)
__A = INVOICE_URL
__A = "What is the invoice number?"
__A = dqa_pipeline(image=A ,question=A ,top_k=2 )
self.assertEqual(nested_simplify(A ,decimals=4 ) ,[{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def UpperCamelCase_ ( self : Optional[int] ):
pass
| 15 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE :Union[str, Any] = get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = "dummy_data"
snake_case_ = "datasets"
snake_case_ = False
def __init__( self : Optional[int] ,A : str ,A : str ,A : Union[Version, str] ,A : Optional[str] = None ,A : bool = False ,A : bool = True ,A : Optional[List[Callable]] = None ,):
__A = 0
__A = dataset_name
__A = cache_dir
__A = use_local_dummy_data
__A = config
# download_callbacks take a single url as input
__A = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__A = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__A = str(A )
# to be downloaded
__A = None
__A = None
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
if self._dummy_file is None:
__A = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase_ ( self : Optional[Any] ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def UpperCamelCase_ ( self : List[Any] ):
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def UpperCamelCase_ ( self : Tuple ):
__A = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__A = cached_path(
A ,cache_dir=self.cache_dir ,extract_compressed_file=A ,force_extract=A )
return os.path.join(A ,self.dummy_file_name )
@property
def UpperCamelCase_ ( self : str ):
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def UpperCamelCase_ ( self : Any ):
if self._bucket_url is None:
__A = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def UpperCamelCase_ ( self : Tuple ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,*A : Dict ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__A = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__A = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A ,A ):
return self.create_dummy_data_dict(A ,A )
elif isinstance(A ,(list, tuple) ):
return self.create_dummy_data_list(A ,A )
else:
return self.create_dummy_data_single(A ,A )
def UpperCamelCase_ ( self : str ,A : List[Any] ,*A : List[Any] ):
return self.download_and_extract(A )
def UpperCamelCase_ ( self : List[str] ,A : List[str] ,A : Tuple ):
return self.download_and_extract(A )
def UpperCamelCase_ ( self : Any ,A : Any ,*A : Optional[Any] ,**A : List[str] ):
return path
def UpperCamelCase_ ( self : str ):
return {}
def UpperCamelCase_ ( self : int ,A : int ,A : Tuple ):
__A = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A ,A ):
for single_url in single_urls:
download_callback(A )
else:
__A = single_urls
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A ,A ):
__A = [os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) ) for x in single_urls]
else:
__A = single_urls
__A = os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) )
__A = value
# make sure that values are unique
if all(isinstance(A ,A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__A = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,A : str ):
__A = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,A ) ) for url in data_url )
__A = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__A = [data_url[0]] * len(A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__A = os.path.join(A ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(A )
return dummy_data_list
def UpperCamelCase_ ( self : str ,A : List[Any] ,A : Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__A = os.path.join(A ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase_ ( self : int ):
pass
def UpperCamelCase_ ( self : Dict ):
pass
def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ):
def _iter_archive_members(A : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__A = Path(self.dummy_file ).parent
__A = path.relative_to(A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__A = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A )
__A = Path(A )
__A = _iter_archive_members(A ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(A ).as_posix(), file_path.open("rb" )
def UpperCamelCase_ ( self : List[Any] ,A : Any ):
if not isinstance(A ,A ):
__A = [paths]
for path in paths:
if os.path.isfile(A ):
if os.path.basename(A ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A ):
if os.path.basename(A ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(A ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(A ,A )
| 15 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = str(id_ )
UpperCamelCase : Optional[int] = None
UpperCamelCase : Any = None
UpperCamelCase : Any = []
UpperCamelCase : str = {} # {vertex:distance}
def __lt__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.key < other.key
def __repr__( self ):
"""simple docstring"""
return self.id
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
self.neighbors.append(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = weight
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , SCREAMING_SNAKE_CASE_ )
graph[b - 1].add_edge(graph[a - 1] , SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : Vertex ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
for u in graph:
UpperCamelCase : Optional[int] = math.inf
UpperCamelCase : Optional[Any] = None
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = graph[:]
while q:
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ )
q.remove(SCREAMING_SNAKE_CASE_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCamelCase : List[str] = u
UpperCamelCase : List[str] = u.edges[v.id]
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : Vertex ):
"""simple docstring"""
for u in graph:
UpperCamelCase : Optional[Any] = math.inf
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = list(SCREAMING_SNAKE_CASE_ )
hq.heapify(SCREAMING_SNAKE_CASE_ )
while h:
UpperCamelCase : List[Any] = hq.heappop(SCREAMING_SNAKE_CASE_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCamelCase : Optional[Any] = u
UpperCamelCase : Union[str, Any] = u.edges[v.id]
hq.heapify(SCREAMING_SNAKE_CASE_ )
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def a ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 315 | 1 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __snake_case ( lowerCAmelCase ):
def __init__( self ,**snake_case ):
'''simple docstring'''
super().__init__(**snake_case )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self ,snake_case ,**snake_case ):
'''simple docstring'''
return super().__call__(snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = {}
if "candidate_labels" in kwargs:
lowercase : List[str] = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
lowercase : Dict = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ,snake_case="This is a sound of {}." ):
'''simple docstring'''
if isinstance(snake_case ,snake_case ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowercase : Optional[Any] = requests.get(snake_case ).content
else:
with open(snake_case ,"""rb""" ) as f:
lowercase : Union[str, Any] = f.read()
if isinstance(snake_case ,snake_case ):
lowercase : int = ffmpeg_read(snake_case ,self.feature_extractor.sampling_rate )
if not isinstance(snake_case ,np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
lowercase : Dict = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors="""pt""" )
lowercase : Tuple = candidate_labels
lowercase : Tuple = [hypothesis_template.format(snake_case ) for x in candidate_labels]
lowercase : Optional[Any] = self.tokenizer(snake_case ,return_tensors=self.framework ,padding=snake_case )
lowercase : Optional[Any] = [text_inputs]
return inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[str] = model_inputs.pop("""candidate_labels""" )
lowercase : Dict = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,snake_case ):
lowercase : List[Any] = text_inputs[0]
else:
# Batching case.
lowercase : Dict = text_inputs[0][0]
lowercase : Optional[Any] = self.model(**snake_case ,**snake_case )
lowercase : Any = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = model_outputs.pop("""candidate_labels""" )
lowercase : Any = model_outputs["""logits"""][0]
if self.framework == "pt":
lowercase : Any = logits.softmax(dim=0 )
lowercase : Tuple = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
lowercase : Tuple = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(snake_case ,snake_case ) ,key=lambda snake_case : -x[0] )
]
return result
| 20 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : List[str] = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : Dict = PegasusConfig
snake_case__ : Union[str, Any] = {}
snake_case__ : Any = "gelu"
def __init__( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int=1_3 , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : List[Any]=9_9 , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=3_7 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : List[Any]=2_0 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : Optional[Any]=0 , ) -> Any:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
def UpperCAmelCase_ ( self : Dict ) -> Dict:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__SCREAMING_SNAKE_CASE = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE = np.concatenate([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_pegasus_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) -> str:
__SCREAMING_SNAKE_CASE = 2_0
__SCREAMING_SNAKE_CASE = model_class_name(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict["input_ids"] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = 2_0
__SCREAMING_SNAKE_CASE = model_class_name(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict["input_ids"] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__SCREAMING_SNAKE_CASE = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
'''simple docstring'''
if attention_mask is None:
__SCREAMING_SNAKE_CASE = np.not_equal(lowerCAmelCase_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Tuple = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case__ : Union[str, Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case__ : Tuple = True
snake_case__ : Union[str, Any] = False
snake_case__ : int = False
snake_case__ : List[Any] = False
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = FlaxPegasusModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
@jax.jit
def encode_jitted(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : int ):
return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__SCREAMING_SNAKE_CASE = encode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = encode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__SCREAMING_SNAKE_CASE = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ):
return model.decode(
decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , )
with self.subTest("JIT Enabled" ):
__SCREAMING_SNAKE_CASE = decode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = decode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("google/pegasus-large" , from_pt=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = np.ones((1, 1) )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
__SCREAMING_SNAKE_CASE = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
__SCREAMING_SNAKE_CASE = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__SCREAMING_SNAKE_CASE = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
__SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ , return_tensors="np" , truncation=UpperCAmelCase__ , max_length=5_1_2 , padding=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.generate(**UpperCAmelCase__ , num_beams=2 ).sequences
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
assert tgt_text == decoded
| 54 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : Optional[int] = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Dict=8 ) -> int:
"""simple docstring"""
UpperCamelCase :List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase :Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : int , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : DDPMScheduler , __lowerCamelCase : VQModel , ):
super().__init__()
self.register_modules(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , movq=__lowerCamelCase , )
UpperCamelCase :Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _A ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
if latents is None:
UpperCamelCase :Any = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCamelCase :Dict = latents.to(__lowerCamelCase )
UpperCamelCase :Tuple = latents * scheduler.init_noise_sigma
return latents
def _A ( self : str , __lowerCamelCase : Any=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCamelCase :Tuple = torch.device(F"""cuda:{gpu_id}""" )
UpperCamelCase :Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[Any] , __lowerCamelCase : Dict=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
UpperCamelCase :Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase :Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase , UpperCamelCase :Any = cpu_offload_with_hook(__lowerCamelCase , __lowerCamelCase , prev_module_hook=__lowerCamelCase )
# We'll offload the last model manually.
UpperCamelCase :int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _A ( self : Optional[Any] ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCamelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCamelCase )
def __call__( self : List[str] , __lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 100 , __lowerCamelCase : float = 4.0 , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ):
UpperCamelCase :List[Any] = self._execution_device
UpperCamelCase :Any = guidance_scale > 1.0
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :List[str] = torch.cat(__lowerCamelCase , dim=0 )
UpperCamelCase :int = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :List[Any] = torch.cat(__lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase :List[Any] = image_embeds.repeat_interleave(__lowerCamelCase , dim=0 )
UpperCamelCase :Union[str, Any] = negative_image_embeds.repeat_interleave(__lowerCamelCase , dim=0 )
UpperCamelCase :Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase , device=__lowerCamelCase )
UpperCamelCase :str = self.scheduler.timesteps
UpperCamelCase :Optional[Any] = self.unet.config.in_channels
UpperCamelCase , UpperCamelCase :Union[str, Any] = downscale_height_and_width(__lowerCamelCase , __lowerCamelCase , self.movq_scale_factor )
# create initial latent
UpperCamelCase :Optional[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase :Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase :Optional[int] = {"""image_embeds""": image_embeds}
UpperCamelCase :Dict = self.unet(
sample=__lowerCamelCase , timestep=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , added_cond_kwargs=__lowerCamelCase , return_dict=__lowerCamelCase , )[0]
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase :List[str] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase , UpperCamelCase :Tuple = noise_pred.chunk(2 )
UpperCamelCase , UpperCamelCase :str = variance_pred.chunk(2 )
UpperCamelCase :Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase :List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase , UpperCamelCase :Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase :Union[str, Any] = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase , )[0]
# post-processing
UpperCamelCase :int = self.movq.decode(__lowerCamelCase , force_not_quantize=__lowerCamelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCamelCase :Tuple = image * 0.5 + 0.5
UpperCamelCase :int = image.clamp(0 , 1 )
UpperCamelCase :Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase :Optional[Any] = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 62 |
from string import ascii_lowercase, ascii_uppercase
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> str:
"""simple docstring"""
if not sentence:
return ""
UpperCamelCase :str = dict(zip(__magic_name__ , __magic_name__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 62 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : List[Any] = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__UpperCAmelCase = imread(R"digital_image_processing/image_data/lena_small.jpg")
__UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
def A__ ( ):
SCREAMING_SNAKE_CASE_ = cn.convert_to_negative(__lowerCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def A__ ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__lowerCamelCase, 1_10 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def A__ ( ):
SCREAMING_SNAKE_CASE_ = imread('''digital_image_processing/image_data/lena_small.jpg''', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
SCREAMING_SNAKE_CASE_ = canny.canny(__lowerCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def A__ ( ):
assert gg.gaussian_filter(__lowerCamelCase, 5, sigma=0.9 ).all()
def A__ ( ):
# laplace diagonals
SCREAMING_SNAKE_CASE_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
SCREAMING_SNAKE_CASE_ = conv.img_convolve(__lowerCamelCase, __lowerCamelCase ).astype(__lowerCamelCase )
assert res.any()
def A__ ( ):
assert med.median_filter(__lowerCamelCase, 3 ).any()
def A__ ( ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = sob.sobel_filter(__lowerCamelCase )
assert grad.any() and theta.any()
def A__ ( ):
SCREAMING_SNAKE_CASE_ = sp.make_sepia(__lowerCamelCase, 20 )
assert sepia.all()
def A__ ( __lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
SCREAMING_SNAKE_CASE_ = bs.Burkes(imread(__lowerCamelCase, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def A__ ( __lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg", ):
SCREAMING_SNAKE_CASE_ = rs.NearestNeighbour(imread(__lowerCamelCase, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def A__ ( ):
SCREAMING_SNAKE_CASE_ = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
SCREAMING_SNAKE_CASE_ = imread(__lowerCamelCase, 0 )
# Test for get_neighbors_pixel function() return not None
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = image[x_coordinate][y_coordinate]
SCREAMING_SNAKE_CASE_ = lbp.get_neighbors_pixel(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
SCREAMING_SNAKE_CASE_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
SCREAMING_SNAKE_CASE_ = lbp.local_binary_value(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
assert lbp_image.any()
| 257 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =42
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =(16, 32, 96, 256)
UpperCAmelCase_ =jnp.floataa
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(self.block_out_channels ) - 1 ):
SCREAMING_SNAKE_CASE_ = self.block_out_channels[i]
SCREAMING_SNAKE_CASE_ = self.block_out_channels[i + 1]
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
SCREAMING_SNAKE_CASE_ = blocks
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.conv_in(_A )
SCREAMING_SNAKE_CASE_ = nn.silu(_A )
for block in self.blocks:
SCREAMING_SNAKE_CASE_ = block(_A )
SCREAMING_SNAKE_CASE_ = nn.silu(_A )
SCREAMING_SNAKE_CASE_ = self.conv_out(_A )
return embedding
@flax_register_to_config
class UpperCamelCase__ ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =32
UpperCAmelCase_ =4
UpperCAmelCase_ =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase_ =False
UpperCAmelCase_ =(320, 640, 1_280, 1_280)
UpperCAmelCase_ =2
UpperCAmelCase_ =8
UpperCAmelCase_ =None
UpperCAmelCase_ =1_280
UpperCAmelCase_ =0.0
UpperCAmelCase_ =False
UpperCAmelCase_ =jnp.floataa
UpperCAmelCase_ =True
UpperCAmelCase_ =0
UpperCAmelCase_ ="rgb"
UpperCAmelCase_ =(16, 32, 96, 256)
def _UpperCamelCase ( self , _A ) -> FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE_ = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE_ = jnp.zeros(_A , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
SCREAMING_SNAKE_CASE_ = jnp.zeros(_A , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = jax.random.split(_A )
SCREAMING_SNAKE_CASE_ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_A , _A , _A , _A , _A )["params"]
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.block_out_channels
SCREAMING_SNAKE_CASE_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE_ = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE_ = FlaxTimestepEmbedding(_A , dtype=self.dtype )
SCREAMING_SNAKE_CASE_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
SCREAMING_SNAKE_CASE_ = self.only_cross_attention
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = block_out_channels[0]
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = block_out_channels[i]
SCREAMING_SNAKE_CASE_ = i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE_ = FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE_ = FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
for _ in range(self.layers_per_block ):
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
if not is_final_block:
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
SCREAMING_SNAKE_CASE_ = down_blocks
SCREAMING_SNAKE_CASE_ = controlnet_down_blocks
# mid
SCREAMING_SNAKE_CASE_ = block_out_channels[-1]
SCREAMING_SNAKE_CASE_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=_A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A , _A , _A , _A , _A = 1.0 , _A = True , _A = False , ) -> Union[FlaxControlNetOutput, Tuple]:
SCREAMING_SNAKE_CASE_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
SCREAMING_SNAKE_CASE_ = jnp.flip(_A , axis=1 )
# 1. time
if not isinstance(_A , jnp.ndarray ):
SCREAMING_SNAKE_CASE_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE_ = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.expand_dims(_A , 0 )
SCREAMING_SNAKE_CASE_ = self.time_proj(_A )
SCREAMING_SNAKE_CASE_ = self.time_embedding(_A )
# 2. pre-process
SCREAMING_SNAKE_CASE_ = jnp.transpose(_A , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ = self.conv_in(_A )
SCREAMING_SNAKE_CASE_ = jnp.transpose(_A , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ = self.controlnet_cond_embedding(_A )
sample += controlnet_cond
# 3. down
SCREAMING_SNAKE_CASE_ = (sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = down_block(_A , _A , _A , deterministic=not train )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
SCREAMING_SNAKE_CASE_ = self.mid_block(_A , _A , _A , deterministic=not train )
# 5. contronet blocks
SCREAMING_SNAKE_CASE_ = ()
for down_block_res_sample, controlnet_block in zip(_A , self.controlnet_down_blocks ):
SCREAMING_SNAKE_CASE_ = controlnet_block(_A )
controlnet_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE_ = controlnet_down_block_res_samples
SCREAMING_SNAKE_CASE_ = self.controlnet_mid_block(_A )
# 6. scaling
SCREAMING_SNAKE_CASE_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_A , mid_block_res_sample=_A )
| 257 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.