code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__ : Tuple = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str , **UpperCamelCase__: List[str] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: int , **UpperCamelCase__: Tuple ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Tuple = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ : List[Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
lowerCamelCase__ : Tuple = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : Any = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : List[str] = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Optional[Any] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = """lower newer"""
lowerCamelCase__ : Union[str, Any] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Any = """lower newer"""
lowerCamelCase__ : Dict = self.prepare_image_inputs()
lowerCamelCase__ : Tuple = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[str] = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Union[str, Any] = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : Dict = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = """lower newer"""
lowerCamelCase__ : str = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 41 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
A__: Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> int:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] = None ) -> Optional[int]:
_a : Any =tesseract_config if tesseract_config is not None else """"""
# apply OCR
_a : Optional[Any] =to_pil_image(_UpperCAmelCase )
_a , _a : List[Any] =pil_image.size
_a : List[str] =pytesseract.image_to_data(_UpperCAmelCase ,lang=_UpperCAmelCase ,output_type="""dict""" ,config=_UpperCAmelCase )
_a , _a , _a , _a , _a : str =data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_a : Tuple =[idx for idx, word in enumerate(_UpperCAmelCase ) if not word.strip()]
_a : List[Any] =[word for idx, word in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Dict =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : List[str] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : List[str] =[]
for x, y, w, h in zip(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
_a : int =[x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase )
# finally, normalize the bounding boxes
_a : str =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[Any] = ["pixel_values"]
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = "" , **SCREAMING_SNAKE_CASE :Tuple , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : List[Any] =size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_a : Tuple =get_size_dict(SCREAMING_SNAKE_CASE )
_a : Dict =do_resize
_a : Tuple =size
_a : str =resample
_a : Dict =apply_ocr
_a : Union[str, Any] =ocr_lang
_a : Dict =tesseract_config
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Dict , ) -> np.ndarray:
'''simple docstring'''
_a : int =get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
_a : Any =(size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :ImageInput , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE :ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
_a : Optional[int] =do_resize if do_resize is not None else self.do_resize
_a : Optional[int] =size if size is not None else self.size
_a : str =get_size_dict(SCREAMING_SNAKE_CASE )
_a : List[str] =resample if resample is not None else self.resample
_a : int =apply_ocr if apply_ocr is not None else self.apply_ocr
_a : str =ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Union[str, Any] =tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[str] =make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_a : List[Any] =[to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_a : Any =[]
_a : Any =[]
for image in images:
_a , _a : int =apply_tesseract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
words_batch.append(SCREAMING_SNAKE_CASE )
boxes_batch.append(SCREAMING_SNAKE_CASE )
if do_resize:
_a : Union[str, Any] =[self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_a : Dict =[flip_channel_order(SCREAMING_SNAKE_CASE ) for image in images]
_a : str =[to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
_a : str =BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE )
if apply_ocr:
_a : List[Any] =words_batch
_a : Dict =boxes_batch
return data
| 276 | 0 |
'''simple docstring'''
import math
class __UpperCAmelCase :
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = 0.0
_snake_case = 0.0
for i in range(len(lowerCAmelCase_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for i in range(len(lowerCAmelCase_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def SCREAMING_SNAKE_CASE__ ( ) -> None:
# Training Examples ( m, n )
_snake_case = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_snake_case = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_snake_case = SelfOrganizingMap()
_snake_case = 3
_snake_case = 0.5
for _ in range(__A ):
for j in range(len(__A ) ):
# training sample
_snake_case = training_samples[j]
# Compute the winning vector
_snake_case = self_organizing_map.get_winner(__A , __A )
# Update the winning vector
_snake_case = self_organizing_map.update(__A , __A , __A , __A )
# classify test sample
_snake_case = [0, 0, 0, 1]
_snake_case = self_organizing_map.get_winner(__A , __A )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 42 |
'''simple docstring'''
from __future__ import annotations
import requests
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> dict:
_a : Any =F"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(_UpperCAmelCase ).json()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> list[dict]:
_a : Union[str, Any] ="""https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
_a : int =requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> str:
_a : Union[str, Any] =hackernews_top_stories(_UpperCAmelCase )
return "\n".join("""* [{title}]({url})""".format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 276 | 0 |
from math import pi
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 43 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : torch.FloatTensor
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :Tuple[str] = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[str] = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[int] = (6_4,) , SCREAMING_SNAKE_CASE :int = 1 , SCREAMING_SNAKE_CASE :str = "silu" , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :int = 2_5_6 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :float = 0.18_215 , SCREAMING_SNAKE_CASE :str = "group" , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_a : Union[str, Any] =Encoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , down_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , double_z=SCREAMING_SNAKE_CASE , )
_a : Optional[int] =vq_embed_dim if vq_embed_dim is not None else latent_channels
_a : Optional[int] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
_a : str =VectorQuantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beta=0.25 , remap=SCREAMING_SNAKE_CASE , sane_index_shape=SCREAMING_SNAKE_CASE )
_a : List[str] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
# pass init params to Decoder
_a : List[str] =Decoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , up_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , norm_type=SCREAMING_SNAKE_CASE , )
@apply_forward_hook
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> VQEncoderOutput:
'''simple docstring'''
_a : Optional[int] =self.encoder(SCREAMING_SNAKE_CASE )
_a : int =self.quant_conv(SCREAMING_SNAKE_CASE )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE )
@apply_forward_hook
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
_a , _a , _a : Tuple =self.quantize(SCREAMING_SNAKE_CASE )
else:
_a : str =h
_a : Dict =self.post_quant_conv(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =self.decoder(SCREAMING_SNAKE_CASE , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
_a : Tuple =sample
_a : int =self.encode(SCREAMING_SNAKE_CASE ).latents
_a : List[Any] =self.decode(SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE )
| 276 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Union[str, Any] = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int]=1_3 , SCREAMING_SNAKE_CASE :Optional[int]=7 , SCREAMING_SNAKE_CASE :Tuple=False , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :List[str]=3_3 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Tuple=5 , SCREAMING_SNAKE_CASE :int=4 , SCREAMING_SNAKE_CASE :Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Optional[Any]=0.1 , SCREAMING_SNAKE_CASE :Tuple=0.1 , SCREAMING_SNAKE_CASE :str=5_1_2 , SCREAMING_SNAKE_CASE :Dict=1_6 , SCREAMING_SNAKE_CASE :Dict=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :str=3 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :List[str]=None , ) -> Union[str, Any]:
'''simple docstring'''
_a : Union[str, Any] =parent
_a : List[Any] =batch_size
_a : Optional[int] =seq_length
_a : Union[str, Any] =is_training
_a : List[Any] =use_input_mask
_a : Optional[int] =use_token_type_ids
_a : int =use_labels
_a : List[str] =vocab_size
_a : List[Any] =hidden_size
_a : int =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Any =intermediate_size
_a : str =hidden_act
_a : Union[str, Any] =hidden_dropout_prob
_a : Union[str, Any] =attention_probs_dropout_prob
_a : str =max_position_embeddings
_a : Dict =type_vocab_size
_a : Tuple =type_sequence_label_size
_a : Dict =initializer_range
_a : List[str] =num_labels
_a : Tuple =num_choices
_a : int =scope
def __UpperCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : List[Any] =None
if self.use_input_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
_a : str =None
_a : Dict =None
if self.use_labels:
_a : Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[str] =ids_tensor([self.batch_size] , self.num_choices )
_a : List[Any] =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int ) -> Tuple:
'''simple docstring'''
_a : Any =EsmModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Optional[Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
_a : str =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : str =EsmForMaskedLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.num_labels
_a : Tuple =EsmForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Tuple =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
_a : Optional[Any] =self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Any =config_and_inputs
_a : List[Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Any = False
__UpperCamelCase : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : str = ()
__UpperCamelCase : List[str] = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Union[str, Any] = True
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
_a : Dict =EsmModelTester(self )
_a : Optional[Any] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCAmelCase ( self :Tuple ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Dict =type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
_a : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
_a : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :str ) -> Dict:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Union[str, Any] =EsmModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
_a : Optional[Any] =self.model_tester.prepare_config_and_inputs()[0]
_a : Dict =EsmEmbeddings(config=SCREAMING_SNAKE_CASE )
_a : Tuple =torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
_a : Optional[Any] =torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_a : Any =create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =self.model_tester.prepare_config_and_inputs()[0]
_a : Optional[int] =EsmEmbeddings(config=SCREAMING_SNAKE_CASE )
_a : Tuple =torch.empty(2 , 4 , 3_0 )
_a : str =[
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_a : int =torch.as_tensor([expected_single_positions, expected_single_positions] )
_a : Any =embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def __UpperCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def __UpperCAmelCase ( self :str ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
pass
@require_torch
class A__ ( UpperCAmelCase__ ):
@slow
def __UpperCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
with torch.no_grad():
_a : Optional[int] =EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_a : Any =torch.tensor([[0, 1, 2, 3, 4, 5]] )
_a : Tuple =model(SCREAMING_SNAKE_CASE )[0]
_a : int =3_3
_a : Tuple =torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
_a : Any =EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_a : Any =torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_a : int =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 276 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_a , **_a ):
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 45 |
'''simple docstring'''
from math import isqrt
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 ,isqrt(_UpperCAmelCase ) + 1 ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10**6 ) -> int:
_a : List[Any] =0
_a : str =1
_a : Optional[Any] =7
while prime_candidate < max_prime:
primes_count += is_prime(_UpperCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 276 | 0 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def _snake_case ( *lowercase , **lowercase ) -> Any:
pass
@is_pipeline_test
@require_vision
class lowercase ( unittest.TestCase ):
@require_torch
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase = image_classifier(lowercase , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowercase ) , [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] , )
lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
] , )
@require_tf
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase = image_classifier(lowercase , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(lowercase ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , )
lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
] , )
@slow
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase = image_classifier(lowercase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowercase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase = image_classifier(lowercase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowercase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
| 46 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 276 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Optional[int] = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 47 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" ,[
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(_UpperCAmelCase ,i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Dict ) -> Optional[Any]:
_a : Tuple =_distribute_shards(**_UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" ,[
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any] ) -> List[str]:
_a : List[str] =_split_gen_kwargs(_UpperCAmelCase ,_UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" ,[
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
if expected is RuntimeError:
with pytest.raises(_UpperCAmelCase ):
_number_of_shards_in_gen_kwargs(_UpperCAmelCase )
else:
_a : Dict =_number_of_shards_in_gen_kwargs(_UpperCAmelCase )
assert out == expected
| 276 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> List[str]:
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=UpperCamelCase__ , )
assert hasattr(self , "env" )
def _lowercase ( self , UpperCamelCase__ ) -> Dict:
# configuration for running training on smdistributed Model Parallel
lowerCamelCase : Any = {
"enabled": True,
"processes_per_host": 8,
}
lowerCamelCase : Union[str, Any] = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
lowerCamelCase : List[Any] = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
lowerCamelCase : Tuple = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version="py36" , )
def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]:
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
# create estimator
lowerCamelCase : Optional[Any] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
lowerCamelCase : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowerCamelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCamelCase__ )
| 48 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Tuple = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "roc_bert"
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :Tuple=3_0_5_2_2 , SCREAMING_SNAKE_CASE :List[str]=7_6_8 , SCREAMING_SNAKE_CASE :Dict=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE :List[Any]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Optional[int]="absolute" , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :Optional[Any]=9_1_0 , SCREAMING_SNAKE_CASE :Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE :str=2_4_8_5_8 , SCREAMING_SNAKE_CASE :List[Any]=True , **SCREAMING_SNAKE_CASE :Tuple , ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =vocab_size
_a : List[str] =max_position_embeddings
_a : Optional[Any] =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[str] =num_attention_heads
_a : int =intermediate_size
_a : Any =hidden_act
_a : Dict =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : str =initializer_range
_a : Optional[int] =type_vocab_size
_a : Any =layer_norm_eps
_a : Any =use_cache
_a : Optional[int] =enable_pronunciation
_a : Optional[Any] =enable_shape
_a : Optional[Any] =pronunciation_embed_dim
_a : Tuple =pronunciation_vocab_size
_a : Union[str, Any] =shape_embed_dim
_a : Any =shape_vocab_size
_a : Tuple =concat_input
_a : List[str] =position_embedding_type
_a : List[str] =classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 276 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Any = logging.get_logger(__name__)
__snake_case :Tuple = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''markuplm'''
def __init__( self : str , __SCREAMING_SNAKE_CASE : List[str]=30_522 , __SCREAMING_SNAKE_CASE : Optional[Any]=768 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : List[Any]=3_072 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=512 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Any=1E-12 , __SCREAMING_SNAKE_CASE : List[str]=0 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Any=256 , __SCREAMING_SNAKE_CASE : List[str]=1_024 , __SCREAMING_SNAKE_CASE : int=216 , __SCREAMING_SNAKE_CASE : Any=1_001 , __SCREAMING_SNAKE_CASE : str=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]=50 , __SCREAMING_SNAKE_CASE : Optional[int]="absolute" , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
# additional properties
__a = max_depth
__a = max_xpath_tag_unit_embeddings
__a = max_xpath_subs_unit_embeddings
__a = tag_pad_id
__a = subs_pad_id
__a = xpath_unit_hidden_size
| 49 |
'''simple docstring'''
class A__ :
def __init__( self :List[str] ) -> List[Any]:
'''simple docstring'''
_a : Tuple =0
_a : Any =0
_a : int ={}
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :List[str] ) -> Optional[int]:
'''simple docstring'''
if vertex not in self.adjacency:
_a : Dict ={}
self.num_vertices += 1
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Any ) -> List[str]:
'''simple docstring'''
self.add_vertex(SCREAMING_SNAKE_CASE )
self.add_vertex(SCREAMING_SNAKE_CASE )
if head == tail:
return
_a : Any =weight
_a : Tuple =weight
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =self.get_edges()
for edge in edges:
_a , _a , _a : List[str] =edge
edges.remove((tail, head, weight) )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
_a : str =list(edges[i] )
edges.sort(key=lambda SCREAMING_SNAKE_CASE : e[2] )
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a : Union[str, Any] =edges[i][2] + 1
for edge in edges:
_a , _a , _a : Tuple =edge
_a : Tuple =weight
_a : List[Any] =weight
def __str__( self :int ) -> str:
'''simple docstring'''
_a : int =""""""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a : str =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("""\n""" )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : Union[str, Any] =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[Any]=None ) -> Optional[int]:
'''simple docstring'''
_a : str =Graph()
if vertices is None:
_a : Union[str, Any] =[]
if edges is None:
_a : List[Any] =[]
for vertex in vertices:
g.add_vertex(SCREAMING_SNAKE_CASE )
for edge in edges:
g.add_edge(*SCREAMING_SNAKE_CASE )
return g
class A__ :
def __init__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_a : Optional[int] ={}
_a : List[str] ={}
def __len__( self :List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.parent )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Tuple ) -> Dict:
'''simple docstring'''
if item in self.parent:
return self.find(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =item
_a : List[str] =0
return item
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Dict ) -> List[str]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(SCREAMING_SNAKE_CASE )
if item != self.parent[item]:
_a : str =self.find(self.parent[item] )
return self.parent[item]
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :List[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[int] =self.find(SCREAMING_SNAKE_CASE )
_a : Dict =self.find(SCREAMING_SNAKE_CASE )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a : Any =roota
return roota
if self.rank[roota] < self.rank[roota]:
_a : List[str] =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a : List[Any] =roota
return roota
return None
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict ) -> Union[str, Any]:
'''simple docstring'''
_a : Any =graph.num_vertices
_a : Union[str, Any] =Graph.UnionFind()
_a : Optional[int] =[]
while num_components > 1:
_a : str ={}
for vertex in graph.get_vertices():
_a : List[str] =-1
_a : Any =graph.get_edges()
for edge in edges:
_a , _a , _a : Tuple =edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a : Any =edge
_a : Any =union_find.find(SCREAMING_SNAKE_CASE )
_a : List[Any] =union_find.find(SCREAMING_SNAKE_CASE )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a : Optional[int] =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a : List[Any] =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a : Optional[Any] =cheap_edge[vertex]
if union_find.find(SCREAMING_SNAKE_CASE ) != union_find.find(SCREAMING_SNAKE_CASE ):
union_find.union(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
mst_edges.append(cheap_edge[vertex] )
_a : str =num_components - 1
_a : str =Graph.build(edges=SCREAMING_SNAKE_CASE )
return mst
| 276 | 0 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
# ===== initialization =====
lowerCamelCase__ : Union[str, Any] = Mock()
lowerCamelCase__ : Any = conn, Mock()
lowerCamelCase__ : int = iter([1, None] )
lowerCamelCase__ : Optional[int] = lambda _UpperCAmelCase : next(_UpperCAmelCase )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=_UpperCAmelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 50 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A__: Union[str, Any] = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
A__: Tuple = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
A__: Union[str, Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
A__: List[Any] = requests.get(image_url).content
A__: List[str] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 276 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : Any , _snake_case : int , _snake_case : List[str]=7 , _snake_case : Dict=3 , _snake_case : str=18 , _snake_case : Optional[Any]=30 , _snake_case : Any=400 , _snake_case : Optional[Any]=True , _snake_case : Tuple=None , _snake_case : List[Any]=True , _snake_case : Tuple=False , _snake_case : str=True , _snake_case : Any=True , _snake_case : Dict=[0.5, 0.5, 0.5] , _snake_case : Optional[int]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''height''': 18, '''width''': 20}
UpperCAmelCase_ = do_thumbnail
UpperCAmelCase_ = do_align_axis
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : int = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = DonutImageProcessingTester(self)
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_snake_case , '''do_resize'''))
self.assertTrue(hasattr(_snake_case , '''size'''))
self.assertTrue(hasattr(_snake_case , '''do_thumbnail'''))
self.assertTrue(hasattr(_snake_case , '''do_align_long_axis'''))
self.assertTrue(hasattr(_snake_case , '''do_pad'''))
self.assertTrue(hasattr(_snake_case , '''do_normalize'''))
self.assertTrue(hasattr(_snake_case , '''image_mean'''))
self.assertTrue(hasattr(_snake_case , '''image_std'''))
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20})
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42})
# Previous config had dimensions in (width, height) order
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84))
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42})
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
@is_flaky()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case)
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_snake_case , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case)
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_snake_case , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case)
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_snake_case , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 51 |
'''simple docstring'''
A__: Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 276 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class A__ ( __snake_case ):
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = params
UpperCamelCase : Union[str, Any] = np.array(A_ )
UpperCamelCase : Optional[int] = np.array([len(A_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , A_ ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def __UpperCamelCase( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.params.max_model_input_size
UpperCamelCase : Dict = self.lengths > max_len
logger.info(F"""Splitting {sum(A_ )} too long sequences.""" )
def divide_chunks(A_ , A_ ):
return [l[i : i + n] for i in range(0 , len(A_ ) , A_ )]
UpperCamelCase : List[str] = []
UpperCamelCase : Tuple = []
if self.params.mlm:
UpperCamelCase , UpperCamelCase : Dict = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
UpperCamelCase , UpperCamelCase : Dict = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
UpperCamelCase : Dict = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
UpperCamelCase : Union[str, Any] = np.insert(A_ , 0 , A_ )
if sub_s[-1] != sep_id:
UpperCamelCase : Optional[int] = np.insert(A_ , len(A_ ) , A_ )
assert len(A_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A_ )
new_tok_ids.extend(A_ )
new_lengths.extend([len(A_ ) for l in sub_seqs] )
UpperCamelCase : Union[str, Any] = np.array(A_ )
UpperCamelCase : Union[str, Any] = np.array(A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = len(self )
UpperCamelCase : Dict = self.lengths > 11
UpperCamelCase : List[str] = self.token_ids[indices]
UpperCamelCase : Tuple = self.lengths[indices]
UpperCamelCase : Optional[Any] = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __UpperCamelCase( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCamelCase : List[str] = self.params.special_tok_ids["unk_token"]
UpperCamelCase : int = len(self )
UpperCamelCase : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
UpperCamelCase : List[Any] = (unk_occs / self.lengths) < 0.5
UpperCamelCase : List[Any] = self.token_ids[indices]
UpperCamelCase : str = self.lengths[indices]
UpperCamelCase : Dict = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : str = [t[0] for t in batch]
UpperCamelCase : str = [t[1] for t in batch]
assert len(A_ ) == len(A_ )
# Max for paddings
UpperCamelCase : Union[str, Any] = max(A_ )
# Pad token ids
if self.params.mlm:
UpperCamelCase : List[Any] = self.params.special_tok_ids["pad_token"]
else:
UpperCamelCase : Dict = self.params.special_tok_ids["unk_token"]
UpperCamelCase : Optional[int] = [list(t.astype(A_ ) ) + [pad_idx] * (max_seq_len_ - len(A_ )) for t in token_ids]
assert len(tk_ ) == len(A_ )
assert all(len(A_ ) == max_seq_len_ for t in tk_ )
UpperCamelCase : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
UpperCamelCase : Any = torch.tensor(A_ ) # (bs)
return tk_t, lg_t
| 52 |
'''simple docstring'''
A__: Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__: Any = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__: int = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> str:
assert len(str(_UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_a : List[str] =year // 100
_a : List[str] =(5 * (century % 4) + 2) % 7
_a : Optional[int] =year % 100
_a : Any =centurian % 12
_a : int =(
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_a : Optional[Any] =(
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_a : str =(dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 | 0 |
'''simple docstring'''
from math import factorial
class snake_case :
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : Tuple ):
__UpperCamelCase = real
if isinstance(__A , __A ):
__UpperCamelCase = [1] * rank
else:
__UpperCamelCase = rank
def __repr__( self : Any ):
return (
f'''{self.real}+'''
f'''{'+'.join(str(__A )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , __A )
def __add__( self : List[Any] , __A : Any ):
if not isinstance(__A , __A ):
return Dual(self.real + other , self.duals )
__UpperCamelCase = self.duals.copy()
__UpperCamelCase = other.duals.copy()
if len(__A ) > len(__A ):
o_dual.extend([1] * (len(__A ) - len(__A )) )
elif len(__A ) < len(__A ):
s_dual.extend([1] * (len(__A ) - len(__A )) )
__UpperCamelCase = []
for i in range(len(__A ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , __A )
SCREAMING_SNAKE_CASE_ : Tuple =__add__
def __sub__( self : List[str] , __A : List[Any] ):
return self + other * -1
def __mul__( self : Tuple , __A : List[str] ):
if not isinstance(__A , __A ):
__UpperCamelCase = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , __A )
__UpperCamelCase = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , __A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =__mul__
def __truediv__( self : int , __A : List[str] ):
if not isinstance(__A , __A ):
__UpperCamelCase = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , __A )
raise ValueError
def __floordiv__( self : Any , __A : List[Any] ):
if not isinstance(__A , __A ):
__UpperCamelCase = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , __A )
raise ValueError
def __pow__( self : str , __A : Optional[int] ):
if n < 0 or isinstance(__A , __A ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
__UpperCamelCase = self
for _ in range(n - 1 ):
x *= self
return x
def lowercase__ ( __lowercase : str , __lowercase : Optional[int] , __lowercase : List[str] ) -> Dict:
"""simple docstring"""
if not callable(__lowercase ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(__lowercase , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(__lowercase , __lowercase ):
raise ValueError('differentiate() requires an int as input for order' )
__UpperCamelCase = Dual(__lowercase , 1 )
__UpperCamelCase = func(__lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowercase__ ( __lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 53 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str
__UpperCamelCase : int
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> list[str]:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(_UpperCAmelCase ) )]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_a : List[Any] =all_rotations(_UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_a : BWTTransformDict ={
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCAmelCase ),
}
return response
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_a : List[str] =int(_UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(_UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_a : Optional[int] =[""""""] * len(_UpperCAmelCase )
for _ in range(len(_UpperCAmelCase ) ):
for i in range(len(_UpperCAmelCase ) ):
_a : int =bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
A__: Any = '''Provide a string that I will generate its BWT transform: '''
A__: Union[str, Any] = input(entry_msg).strip()
A__: Optional[int] = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
A__: Union[str, Any] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 276 | 0 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
a__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
a__ : List[str] = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
a__ : Any = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
a__ : List[Any] = F"down_blocks.{i}.resnets.{j}."
a__ : Union[str, Any] = F"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
a__ : List[Any] = F"down_blocks.{i}.attentions.{j}."
a__ : Optional[Any] = F"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
a__ : List[str] = F"up_blocks.{i}.resnets.{j}."
a__ : Any = F"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
a__ : Optional[Any] = F"up_blocks.{i}.attentions.{j}."
a__ : Optional[Any] = F"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
a__ : Tuple = F"down_blocks.{i}.downsamplers.0.conv."
a__ : Optional[int] = F"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
a__ : List[str] = F"up_blocks.{i}.upsamplers.0."
a__ : List[str] = F"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
a__ : Union[str, Any] = '''mid_block.attentions.0.'''
a__ : Any = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
a__ : List[Any] = F"mid_block.resnets.{j}."
a__ : List[Any] = F"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
__SCREAMING_SNAKE_CASE = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
__SCREAMING_SNAKE_CASE = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
__SCREAMING_SNAKE_CASE = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = v
__SCREAMING_SNAKE_CASE = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
a__ : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
a__ : int = F"encoder.down_blocks.{i}.resnets.{j}."
a__ : Optional[int] = F"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
a__ : List[str] = F"down_blocks.{i}.downsamplers.0."
a__ : str = F"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
a__ : List[Any] = F"up_blocks.{i}.upsamplers.0."
a__ : Union[str, Any] = F"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
a__ : Optional[int] = F"decoder.up_blocks.{i}.resnets.{j}."
a__ : Optional[int] = F"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
a__ : Any = F"mid_block.resnets.{i}."
a__ : int = F"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
a__ : Any = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
__SCREAMING_SNAKE_CASE = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
__SCREAMING_SNAKE_CASE = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = v
__SCREAMING_SNAKE_CASE = {v: vae_state_dict[k] for k, v in mapping.items()}
__SCREAMING_SNAKE_CASE = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
__SCREAMING_SNAKE_CASE = reshape_weight_for_sd(lowerCAmelCase_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
a__ : str = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
a__ : Tuple = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
a__ : str = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
a__ : int = {'''q''': 0, '''k''': 1, '''v''': 2}
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
__SCREAMING_SNAKE_CASE = k[: -len(".q_proj.weight" )]
__SCREAMING_SNAKE_CASE = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
__SCREAMING_SNAKE_CASE = [None, None, None]
__SCREAMING_SNAKE_CASE = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
__SCREAMING_SNAKE_CASE = k[: -len(".q_proj.bias" )]
__SCREAMING_SNAKE_CASE = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
__SCREAMING_SNAKE_CASE = [None, None, None]
__SCREAMING_SNAKE_CASE = v
continue
__SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
__SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = torch.cat(lowerCAmelCase_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
__SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = torch.cat(lowerCAmelCase_ )
return new_state_dict
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
a__ : Any = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
a__ : Any = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
a__ : Any = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
a__ : Any = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
a__ : Union[str, Any] = load_file(unet_path, device='''cpu''')
else:
a__ : str = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
a__ : Union[str, Any] = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
a__ : str = load_file(vae_path, device='''cpu''')
else:
a__ : List[str] = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
a__ : Any = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
a__ : int = load_file(text_enc_path, device='''cpu''')
else:
a__ : List[Any] = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
a__ : Optional[int] = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
a__ : Tuple = convert_unet_state_dict(unet_state_dict)
a__ : List[str] = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
a__ : List[str] = convert_vae_state_dict(vae_state_dict)
a__ : Optional[Any] = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
a__ : str = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
a__ : Optional[int] = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
a__ : Tuple = convert_text_enc_state_dict_vaa(text_enc_dict)
a__ : Tuple = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
a__ : List[str] = convert_text_enc_state_dict(text_enc_dict)
a__ : Union[str, Any] = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
a__ : List[str] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
a__ : Optional[int] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
a__ : Union[str, Any] = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 54 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__: List[str] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Optional[int] = ['''ChineseCLIPFeatureExtractor''']
A__: Any = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Dict = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276 | 0 |
'''simple docstring'''
import torch
def __snake_case ( ):
if torch.cuda.is_available():
lowerCamelCase_ = torch.cuda.device_count()
else:
lowerCamelCase_ = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 55 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
_a : dict[str, TrieNode] ={} # Mapping from char to TrieNode
_a : List[str] =False
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
_a : str =self
for char in word:
if char not in curr.nodes:
_a : Dict =TrieNode()
_a : List[Any] =curr.nodes[char]
_a : int =True
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> bool:
'''simple docstring'''
_a : int =self
for char in word:
if char not in curr.nodes:
return False
_a : List[Any] =curr.nodes[char]
return curr.is_leaf
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
def _delete(SCREAMING_SNAKE_CASE :TrieNode , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :int ) -> bool:
if index == len(SCREAMING_SNAKE_CASE ):
# If word does not exist
if not curr.is_leaf:
return False
_a : Any =False
return len(curr.nodes ) == 0
_a : int =word[index]
_a : int =curr.nodes.get(SCREAMING_SNAKE_CASE )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_a : List[Any] =_delete(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , SCREAMING_SNAKE_CASE , 0 )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : TrieNode ,_UpperCAmelCase : str ) -> None:
if node.is_leaf:
print(_UpperCAmelCase ,end=""" """ )
for key, value in node.nodes.items():
print_words(_UpperCAmelCase ,word + key )
def SCREAMING_SNAKE_CASE_ ( ) -> bool:
_a : List[str] ="""banana bananas bandana band apple all beast""".split()
_a : List[Any] =TrieNode()
root.insert_many(_UpperCAmelCase )
# print_words(root, "")
assert all(root.find(_UpperCAmelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : bool ) -> None:
print(str(_UpperCAmelCase ) ,"""works!""" if passes else """doesn't work :(""" )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE_ ( ) -> None:
print_results("""Testing trie functionality""" ,test_trie() )
if __name__ == "__main__":
main()
| 276 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class a :
def __init__( self : str , lowercase_ : Any , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=True , lowercase_ : Optional[int]=False , lowercase_ : List[Any]=10 , lowercase_ : Dict=3 , lowercase_ : str=32 * 4 , lowercase_ : Dict=32 * 6 , lowercase_ : Union[str, Any]=4 , lowercase_ : str=32 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = is_training
snake_case_ = use_auxiliary_loss
snake_case_ = num_queries
snake_case_ = num_channels
snake_case_ = min_size
snake_case_ = max_size
snake_case_ = num_labels
snake_case_ = mask_feature_size
def A_ ( self : Optional[Any] ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowercase_ )
snake_case_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase_ )
snake_case_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase_ ) > 0.5
).float()
snake_case_ = (torch.rand((self.batch_size, self.num_labels) , device=lowercase_ ) > 0.5).long()
snake_case_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A_ ( self : Any ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def A_ ( self : List[Any] ):
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = self.prepare_config_and_inputs()
snake_case_ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def A_ ( self : Any , lowercase_ : int , lowercase_ : Tuple ):
snake_case_ = output.encoder_hidden_states
snake_case_ = output.pixel_decoder_hidden_states
snake_case_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , config.decoder_config.decoder_layers )
def A_ ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : List[str]=False ):
with torch.no_grad():
snake_case_ = MaskFormerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
snake_case_ = model(lowercase_ , output_hidden_states=lowercase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowercase_ , lowercase_ )
def A_ ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Union[str, Any] ):
snake_case_ = MaskFormerForInstanceSegmentation(config=lowercase_ )
model.to(lowercase_ )
model.eval()
def comm_check_on_output(lowercase_ : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case_ = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
snake_case_ = model(lowercase_ )
comm_check_on_output(lowercase_ )
snake_case_ = model(
pixel_values=lowercase_ , pixel_mask=lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
comm_check_on_output(lowercase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Union[str, Any] ):
snake_case_ = MaskFormerModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def A_ ( self : Any ):
self.config_tester.run_common_tests()
def A_ ( self : Dict ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowercase_ )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def A_ ( self : List[Any] ):
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def A_ ( self : str ):
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def A_ ( self : Tuple ):
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def A_ ( self : Optional[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def A_ ( self : List[str] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A_ ( self : Union[str, Any] ):
pass
def A_ ( self : str ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
@slow
def A_ ( self : List[str] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
snake_case_ = MaskFormerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def A_ ( self : Optional[int] ):
snake_case_ = (self.model_tester.min_size,) * 2
snake_case_ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowercase_ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=lowercase_ ),
'''class_labels''': torch.zeros(2 , 10 , device=lowercase_ ).long(),
}
snake_case_ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowercase_ )
snake_case_ = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
def A_ ( self : List[str] ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def A_ ( self : str ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowercase_ ).to(lowercase_ )
snake_case_ = model(**lowercase_ , output_attentions=lowercase_ )
self.assertTrue(outputs.attentions is not None )
def A_ ( self : Dict ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
snake_case_ = self.all_model_classes[1]
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
snake_case_ = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ ).loss
loss.backward()
def A_ ( self : List[str] ):
# only MaskFormerForInstanceSegmentation has the loss
snake_case_ = self.all_model_classes[1]
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
snake_case_ = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
snake_case_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
snake_case_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowercase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a : List[Any] = 1E-4
def __magic_name__ ( ) -> Optional[int]:
'''simple docstring'''
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class a ( unittest.TestCase ):
@cached_property
def A_ ( self : Tuple ):
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def A_ ( self : Optional[int] ):
snake_case_ = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(lowercase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
snake_case_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1088) )
with torch.no_grad():
snake_case_ = model(**lowercase_ )
snake_case_ = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
snake_case_ = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
snake_case_ = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def A_ ( self : List[str] ):
snake_case_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowercase_ )
.eval()
)
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
snake_case_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1088) )
with torch.no_grad():
snake_case_ = model(**lowercase_ )
# masks_queries_logits
snake_case_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case_ = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
snake_case_ = torch.tensor(lowercase_ ).to(lowercase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
# class_queries_logits
snake_case_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case_ = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def A_ ( self : Any ):
snake_case_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(lowercase_ )
.eval()
)
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
snake_case_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1088) )
with torch.no_grad():
snake_case_ = model(**lowercase_ )
# masks_queries_logits
snake_case_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case_ = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
snake_case_ = torch.tensor(lowercase_ ).to(lowercase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
# class_queries_logits
snake_case_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case_ = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def A_ ( self : Dict ):
snake_case_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowercase_ )
.eval()
)
snake_case_ = self.default_image_processor
snake_case_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
snake_case_ = inputs['''pixel_values'''].to(lowercase_ )
snake_case_ = [el.to(lowercase_ ) for el in inputs['''mask_labels''']]
snake_case_ = [el.to(lowercase_ ) for el in inputs['''class_labels''']]
with torch.no_grad():
snake_case_ = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
| 56 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A__: str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276 | 0 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
A : Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__a , **__a ):
super().__init__(*__a , **__a )
requires_backends(self , "vision" )
self.check_model_type(__a )
def __call__( self , __a , **__a ):
return super().__call__(__a , **__a )
def snake_case ( self , **__a ):
return {}, {}, {}
def snake_case ( self , __a ):
__lowerCAmelCase = load_image(__a )
__lowerCAmelCase = image.size
__lowerCAmelCase = self.image_processor(images=__a , return_tensors=self.framework )
return model_inputs
def snake_case ( self , __a ):
__lowerCAmelCase = self.model(**__a )
return model_outputs
def snake_case ( self , __a ):
__lowerCAmelCase = model_outputs.predicted_depth
__lowerCAmelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=__a )
__lowerCAmelCase = prediction.squeeze().cpu().numpy()
__lowerCAmelCase = (output * 2_55 / np.max(__a )).astype("uint8" )
__lowerCAmelCase = Image.fromarray(__a )
__lowerCAmelCase = {}
__lowerCAmelCase = predicted_depth
__lowerCAmelCase = depth
return output_dict
| 57 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
A__: str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : int ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Optional[Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
_a : Union[str, Any] =tensor_name.split(""".""" )
for split in splits[:-1]:
_a : Optional[Any] =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_a : Optional[int] =new_module
_a : Optional[int] =splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
_a : Optional[Any] =tensor_name in module._buffers
_a : str =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
_a : int =False
_a : Tuple =False
if is_buffer or not is_bitsandbytes_available():
_a : str =False
_a : Optional[Any] =False
else:
_a : int =hasattr(bnb.nn ,"""Params4bit""" ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
_a : int =isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
_a : Any =module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_a : int =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : str =value.to("""cpu""" )
if value.dtype == torch.inta:
_a : int =version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
_a : Dict =torch.tensor(_UpperCAmelCase ,device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_UpperCAmelCase ) and fpaa_statistics is None:
_a : int =new_value.T
_a : Any =old_value.__dict__
if is_abit:
_a : Any =bnb.nn.IntaParams(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
elif is_abit:
_a : Union[str, Any] =bnb.nn.Paramsabit(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
_a : List[Any] =new_value
if fpaa_statistics is not None:
setattr(module.weight ,"""SCB""" ,fpaa_statistics.to(_UpperCAmelCase ) )
else:
if value is None:
_a : str =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : Any =value.to(_UpperCAmelCase )
else:
_a : str =torch.tensor(_UpperCAmelCase ,device=_UpperCAmelCase )
if is_buffer:
_a : Optional[int] =new_value
else:
_a : Optional[Any] =nn.Parameter(_UpperCAmelCase ,requires_grad=old_value.requires_grad )
_a : Tuple =new_value
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : str=None ,_UpperCAmelCase : Union[str, Any]=False ) -> Dict:
for name, module in model.named_children():
if current_key_name is None:
_a : Optional[int] =[]
current_key_name.append(_UpperCAmelCase )
if (isinstance(_UpperCAmelCase ,nn.Linear ) or isinstance(_UpperCAmelCase ,_UpperCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(_UpperCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a , _a : int =module.weight.shape
else:
_a : List[str] =module.in_features
_a : Tuple =module.out_features
if quantization_config.quantization_method() == "llm_int8":
_a : Optional[Any] =bnb.nn.LinearabitLt(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
_a : Optional[Any] =True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_a : Dict =bnb.nn.Linearabit(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
_a : List[Any] =True
# Store the module class in case we need to transpose the weight later
_a : int =type(_UpperCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_UpperCAmelCase )
if len(list(module.children() ) ) > 0:
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,has_been_replaced=_UpperCAmelCase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Any=None ) -> Tuple:
_a : Dict =["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : Any ,**_UpperCAmelCase : Any ) -> str:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" ,_UpperCAmelCase ,)
return replace_with_bnb_linear(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : str ,**_UpperCAmelCase : Optional[int] ) -> Optional[int]:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" ,_UpperCAmelCase ,)
return set_module_quantized_tensor_to_device(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> Union[str, Any]:
_a : Any =deepcopy(_UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_a : List[Any] =find_tied_parameters(_UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str =sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
_a : Optional[int] =sum(_UpperCAmelCase ,[] )
_a : List[Any] =len(_UpperCAmelCase ) > 0
# Check if it is a base model
_a : Tuple =not hasattr(_UpperCAmelCase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_a : List[Any] =list(model.named_children() )
_a : Dict =[list_modules[-1][0]]
# add last module together with tied weights
_a : List[str] =set(_UpperCAmelCase ) - set(_UpperCAmelCase )
_a : str =list(set(_UpperCAmelCase ) ) + list(_UpperCAmelCase )
# remove ".weight" from the keys
_a : List[Any] =[""".weight""", """.bias"""]
_a : Any =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_a : Any =name.replace(_UpperCAmelCase ,"""""" )
filtered_module_names.append(_UpperCAmelCase )
return filtered_module_names
| 276 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ :
'''simple docstring'''
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> int:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_( self ) -> Any:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def snake_case_( self , A , A , A , A , A , A , A ) -> str:
_SCREAMING_SNAKE_CASE = NystromformerModel(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A )
_SCREAMING_SNAKE_CASE = model(A , token_type_ids=A )
_SCREAMING_SNAKE_CASE = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_( self , A , A , A , A , A , A , A ) -> str:
_SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_( self , A , A , A , A , A , A , A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_( self , A , A , A , A , A , A , A ) -> Any:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_( self , A , A , A , A , A , A , A ) -> str:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_( self , A , A , A , A , A , A , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.num_choices
_SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A , hidden_size=37 )
def snake_case_( self ) -> int:
self.config_tester.run_common_tests()
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*A )
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@slow
def snake_case_( self ) -> int:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(A )[0]
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , A )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
@slow
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = """the [MASK] of Belgium is Brussels"""
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
_SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
_SCREAMING_SNAKE_CASE = tokenizer(A , return_tensors="""pt""" )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
_SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(A ) , """capital""" )
| 58 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A__: int = logging.getLogger(__name__)
@dataclass
class A__ :
__UpperCamelCase : str
__UpperCamelCase : List[str]
__UpperCamelCase : Optional[List[str]]
@dataclass
class A__ :
__UpperCamelCase : List[int]
__UpperCamelCase : List[int]
__UpperCamelCase : Optional[List[int]] = None
__UpperCamelCase : Optional[List[int]] = None
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = "train"
__UpperCamelCase : Tuple = "dev"
__UpperCamelCase : str = "test"
class A__ :
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[Split, str] ) -> List[InputExample]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :List[InputExample] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Optional[Any]="[CLS]" , SCREAMING_SNAKE_CASE :Optional[int]=1 , SCREAMING_SNAKE_CASE :Any="[SEP]" , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Union[str, Any]=False , SCREAMING_SNAKE_CASE :List[str]=0 , SCREAMING_SNAKE_CASE :str=0 , SCREAMING_SNAKE_CASE :Dict=-1_0_0 , SCREAMING_SNAKE_CASE :Optional[int]=0 , SCREAMING_SNAKE_CASE :Tuple=True , ) -> List[InputFeatures]:
'''simple docstring'''
_a : str ={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )}
_a : Tuple =[]
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
_a : Optional[Any] =[]
_a : List[Any] =[]
for word, label in zip(example.words , example.labels ):
_a : Optional[int] =tokenizer.tokenize(SCREAMING_SNAKE_CASE )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_a : Optional[int] =tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE ) > max_seq_length - special_tokens_count:
_a : List[Any] =tokens[: (max_seq_length - special_tokens_count)]
_a : Tuple =label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_a : Dict =[sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_a : Any =[cls_token] + tokens
_a : Dict =[pad_token_label_id] + label_ids
_a : Union[str, Any] =[cls_token_segment_id] + segment_ids
_a : List[str] =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_a : Optional[int] =[1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE )
# Zero-pad up to the sequence length.
_a : Union[str, Any] =max_seq_length - len(SCREAMING_SNAKE_CASE )
if pad_on_left:
_a : Optional[Any] =([pad_token] * padding_length) + input_ids
_a : Optional[int] =([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_a : Union[str, Any] =([pad_token_segment_id] * padding_length) + segment_ids
_a : Dict =([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_a : Tuple =None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self :Dict , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :int=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> List[str]:
'''simple docstring'''
# Load data features from cache or dataset file
_a : Optional[Any] =os.path.join(
SCREAMING_SNAKE_CASE , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : List[str] =cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE ):
if os.path.exists(SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
_a : Any =torch.load(SCREAMING_SNAKE_CASE )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
_a : Any =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[str] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , SCREAMING_SNAKE_CASE )
def __len__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :Dict , SCREAMING_SNAKE_CASE :int ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class A__ :
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = -100
def __init__( self :str , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> Any:
'''simple docstring'''
_a : Tuple =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[Any] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
_a : List[Any] =self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self :str ) -> Optional[int]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :int , SCREAMING_SNAKE_CASE :str ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
| 276 | 0 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase :
def __init__(self : str , snake_case__ : int ) -> None:
'''simple docstring'''
snake_case : str = num_of_nodes
snake_case : list[list[int]] = []
snake_case : dict[int, int] = {}
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : int ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : int ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
snake_case : Union[str, Any] = self.find_component(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : list[int] , snake_case__ : int , snake_case__ : int ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
snake_case : Any = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case__ )
elif component_size[u_node] >= component_size[v_node]:
snake_case : List[Any] = self.find_component(snake_case__ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> None:
'''simple docstring'''
snake_case : Dict = []
snake_case : Any = 0
snake_case : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
snake_case : str = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
snake_case , snake_case , snake_case : Dict = edge
snake_case : List[str] = self.m_component[u]
snake_case : Union[str, Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
snake_case : Optional[Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case__ , snake_case__ ):
snake_case , snake_case , snake_case : List[str] = edge
snake_case : Tuple = self.m_component[u]
snake_case : Dict = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case__ , snake_case__ , snake_case__ )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
snake_case : Any = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> Optional[int]:
'''simple docstring'''
_a , _a : List[str] =text, pattern
_a , _a : Union[str, Any] =len(SCREAMING_SNAKE_CASE ), len(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __UpperCAmelCase ( self :Union[str, Any] ) -> list[int]:
'''simple docstring'''
# searches pattern in text and returns index positions
_a : Union[str, Any] =[]
for i in range(self.textLen - self.patLen + 1 ):
_a : Any =self.mismatch_in_text(SCREAMING_SNAKE_CASE )
if mismatch_index == -1:
positions.append(SCREAMING_SNAKE_CASE )
else:
_a : int =self.match_in_pattern(self.text[mismatch_index] )
_a : List[str] =(
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A__: Any = '''ABAABA'''
A__: int = '''AB'''
A__: Optional[int] = BoyerMooreSearch(text, pattern)
A__: Optional[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 276 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case_:
def __init__( self : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : List[str]=7 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : int=True , UpperCamelCase_ : int=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[str]=9_9 , UpperCamelCase_ : Optional[Any]=3_6 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Optional[Any]=4 , UpperCamelCase_ : Tuple=3_7 , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Dict=5_1_2 , UpperCamelCase_ : Dict=1_6 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : str=0.02 , UpperCamelCase_ : Tuple=6 , UpperCamelCase_ : List[Any]=6 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : str=1_0_0_0 , ):
lowerCAmelCase : Tuple = parent
lowerCAmelCase : Dict = batch_size
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : List[Any] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Union[str, Any] = is_training
lowerCAmelCase : Optional[int] = use_input_mask
lowerCAmelCase : List[Any] = use_token_type_ids
lowerCAmelCase : Dict = use_labels
lowerCAmelCase : str = vocab_size
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : List[str] = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Tuple = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[Any] = type_sequence_label_size
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : List[str] = coordinate_size
lowerCAmelCase : Any = shape_size
lowerCAmelCase : Tuple = num_labels
lowerCAmelCase : Union[str, Any] = num_choices
lowerCAmelCase : List[str] = scope
lowerCAmelCase : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase : Dict = text_seq_length
lowerCAmelCase : Tuple = (image_size // patch_size) ** 2 + 1
lowerCAmelCase : Tuple = self.text_seq_length + self.image_seq_length
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
lowerCAmelCase : Optional[int] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase : List[str] = bbox[i, j, 3]
lowerCAmelCase : str = bbox[i, j, 1]
lowerCAmelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase : int = bbox[i, j, 2]
lowerCAmelCase : Union[str, Any] = bbox[i, j, 0]
lowerCAmelCase : List[str] = tmp_coordinate
lowerCAmelCase : Dict = tf.constant(UpperCamelCase_ )
lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase : str = None
if self.use_token_type_ids:
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCAmelCase : List[str] = None
lowerCAmelCase : Optional[Any] = None
if self.use_labels:
lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCAmelCase : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : List[Any] = TFLayoutLMvaModel(config=UpperCamelCase_ )
# text + image
lowerCAmelCase : List[Any] = model(UpperCamelCase_ , pixel_values=UpperCamelCase_ , training=UpperCamelCase_ )
lowerCAmelCase : str = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , training=UpperCamelCase_ , )
lowerCAmelCase : Dict = model(UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase : int = model({'''pixel_values''': pixel_values} , training=UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : int = self.num_labels
lowerCAmelCase : str = TFLayoutLMvaForSequenceClassification(config=UpperCamelCase_ )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : Tuple = TFLayoutLMvaForTokenClassification(config=UpperCamelCase_ )
lowerCAmelCase : List[Any] = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[int] = 2
lowerCAmelCase : int = TFLayoutLMvaForQuestionAnswering(config=UpperCamelCase_ )
lowerCAmelCase : Tuple = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , training=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
((lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase)) : Optional[Any] = config_and_inputs
lowerCAmelCase : List[str] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class snake_case_( a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] ):
return True
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any]=False ):
lowerCAmelCase : List[Any] = copy.deepcopy(UpperCamelCase_ )
if model_class in get_values(UpperCamelCase_ ):
lowerCAmelCase : List[str] = {
k: tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCamelCase_ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase_ ):
lowerCAmelCase : List[Any] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase_ ):
lowerCAmelCase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
lowerCAmelCase : Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase_ ):
lowerCAmelCase : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase_ ):
lowerCAmelCase : Optional[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Any = TFLayoutLMvaModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : int = model_class(UpperCamelCase_ )
if getattr(UpperCamelCase_ , '''hf_compute_loss''' , UpperCamelCase_ ):
# The number of elements in the loss should be the same as the number of elements in the label
lowerCAmelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCamelCase_ )[0]
]
lowerCAmelCase : int = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowerCAmelCase : Dict = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
lowerCAmelCase : List[str] = prepared_for_class.pop('''input_ids''' )
lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , **UpperCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
lowerCAmelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
lowerCAmelCase : List[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
lowerCAmelCase : List[str] = -1_0_0
lowerCAmelCase : Tuple = tf.convert_to_tensor(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , **UpperCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
lowerCAmelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
lowerCAmelCase : str = model(UpperCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
lowerCAmelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
# Get keys that were added with the _prepare_for_class function
lowerCAmelCase : List[str] = prepared_for_class.keys() - inputs_dict.keys()
lowerCAmelCase : Dict = inspect.signature(model.call ).parameters
lowerCAmelCase : Optional[int] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
lowerCAmelCase : List[str] = {0: '''input_ids'''}
for label_key in label_keys:
lowerCAmelCase : str = signature_names.index(UpperCamelCase_ )
lowerCAmelCase : Any = label_key
lowerCAmelCase : Union[str, Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
lowerCAmelCase : List[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
lowerCAmelCase : Any = prepared_for_class[value]
lowerCAmelCase : Optional[Any] = tuple(UpperCamelCase_ )
# Send to model
lowerCAmelCase : Dict = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowerCamelCase__ ( self : Union[str, Any] ):
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : str ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Tuple = TFLayoutLMvaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _snake_case ( ):
lowerCAmelCase : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class snake_case_( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Tuple ):
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase_ ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Any = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
lowerCAmelCase : Optional[Any] = self.default_image_processor
lowerCAmelCase : Any = prepare_img()
lowerCAmelCase : Optional[Any] = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' ).pixel_values
lowerCAmelCase : List[Any] = tf.constant([[1, 2]] )
lowerCAmelCase : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
lowerCAmelCase : str = model(input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , training=UpperCamelCase_ )
# verify the logits
lowerCAmelCase : Any = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase_ )
lowerCAmelCase : Tuple = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 60 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
A__: Dict = None
A__: Tuple = {
'''7B''': 1_1008,
'''13B''': 1_3824,
'''30B''': 1_7920,
'''65B''': 2_2016,
'''70B''': 2_8672,
}
A__: Any = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int]=1 ,_UpperCAmelCase : List[str]=256 ) -> Dict:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ) -> List[str]:
with open(_UpperCAmelCase ,"""r""" ) as f:
return json.load(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ) -> Tuple:
with open(_UpperCAmelCase ,"""w""" ) as f:
json.dump(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any]=True ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
_a : Union[str, Any] =os.path.join(_UpperCAmelCase ,"""tmp""" )
os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
_a : int =read_json(os.path.join(_UpperCAmelCase ,"""params.json""" ) )
_a : int =NUM_SHARDS[model_size]
_a : Dict =params["""n_layers"""]
_a : Union[str, Any] =params["""n_heads"""]
_a : List[str] =n_heads // num_shards
_a : int =params["""dim"""]
_a : Union[str, Any] =dim // n_heads
_a : int =1_0_0_0_0.0
_a : str =1.0 / (base ** (torch.arange(0 ,_UpperCAmelCase ,2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_a : str =params["""n_kv_heads"""] # for GQA / MQA
_a : Optional[Any] =n_heads_per_shard // num_key_value_heads
_a : Optional[int] =dim // num_key_value_heads
else: # compatibility with other checkpoints
_a : str =n_heads
_a : Any =n_heads_per_shard
_a : str =dim
# permute for sliced rotary
def permute(_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int]=n_heads ,_UpperCAmelCase : Optional[int]=dim ,_UpperCAmelCase : List[str]=dim ):
return w.view(_UpperCAmelCase ,dima // n_heads // 2 ,2 ,_UpperCAmelCase ).transpose(1 ,2 ).reshape(_UpperCAmelCase ,_UpperCAmelCase )
print(F"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_a : Any =torch.load(os.path.join(_UpperCAmelCase ,"""consolidated.00.pth""" ) ,map_location="""cpu""" )
else:
# Sharded
_a : List[Any] =[
torch.load(os.path.join(_UpperCAmelCase ,F"consolidated.{i:02d}.pth" ) ,map_location="""cpu""" )
for i in range(_UpperCAmelCase )
]
_a : Any =0
_a : Optional[int] ={"""weight_map""": {}}
for layer_i in range(_UpperCAmelCase ):
_a : List[str] =F"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_a : List[str] ={
F"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wq.weight"] ),
F"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wk.weight"] ),
F"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[F"layers.{layer_i}.attention.wv.weight"],
F"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[F"layers.{layer_i}.attention.wo.weight"],
F"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w1.weight"],
F"model.layers.{layer_i}.mlp.down_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w2.weight"],
F"model.layers.{layer_i}.mlp.up_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w3.weight"],
F"model.layers.{layer_i}.input_layernorm.weight": loaded[F"layers.{layer_i}.attention_norm.weight"],
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[F"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_a : Tuple ={
F"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
F"layers.{layer_i}.attention_norm.weight"
].clone(),
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
F"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
_a : str =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wq.weight"].view(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Tuple =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wk.weight"].view(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wv.weight"].view(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase )
_a : List[str] =torch.cat(
[loaded[i][F"layers.{layer_i}.attention.wo.weight"] for i in range(_UpperCAmelCase )] ,dim=1 )
_a : Union[str, Any] =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w1.weight"] for i in range(_UpperCAmelCase )] ,dim=0 )
_a : Tuple =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w2.weight"] for i in range(_UpperCAmelCase )] ,dim=1 )
_a : Union[str, Any] =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w3.weight"] for i in range(_UpperCAmelCase )] ,dim=0 )
_a : str =inv_freq
for k, v in state_dict.items():
_a : Any =filename
param_count += v.numel()
torch.save(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Union[str, Any] =F"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_a : List[str] ={
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_a : int ={
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(_UpperCAmelCase )] ,dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_UpperCAmelCase )] ,dim=0 ),
}
for k, v in state_dict.items():
_a : Dict =filename
param_count += v.numel()
torch.save(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )
# Write configs
_a : Tuple ={"""total_size""": param_count * 2}
write_json(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,"""pytorch_model.bin.index.json""" ) )
_a : Optional[Any] =params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_a : int =params["""multiple_of"""] if """multiple_of""" in params else 256
_a : List[Any] =LlamaConfig(
hidden_size=_UpperCAmelCase ,intermediate_size=compute_intermediate_size(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,num_attention_heads=params["""n_heads"""] ,num_hidden_layers=params["""n_layers"""] ,rms_norm_eps=params["""norm_eps"""] ,num_key_value_heads=_UpperCAmelCase ,)
config.save_pretrained(_UpperCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_a : Any =LlamaForCausalLM.from_pretrained(_UpperCAmelCase ,torch_dtype=torch.floataa ,low_cpu_mem_usage=_UpperCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(_UpperCAmelCase ,safe_serialization=_UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> Optional[Any]:
# Initialize the tokenizer based on the `spm` model
_a : List[str] =LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
_a : List[Any] =tokenizer_class(_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : List[str] =argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" ,help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" ,)
parser.add_argument(
"""--model_size""" ,choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] ,)
parser.add_argument(
"""--output_dir""" ,help="""Location to write HF model and tokenizer""" ,)
parser.add_argument("""--safe_serialization""" ,type=_UpperCAmelCase ,help="""Whether or not to save using `safetensors`.""" )
_a : Optional[Any] =parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir ,input_base_path=os.path.join(args.input_dir ,args.model_size ) ,model_size=args.model_size ,safe_serialization=args.safe_serialization ,)
_a : List[Any] =os.path.join(args.input_dir ,"""tokenizer.model""" )
write_tokenizer(args.output_dir ,_UpperCAmelCase )
if __name__ == "__main__":
main()
| 276 | 0 |
"""simple docstring"""
from collections import namedtuple
_a = namedtuple('from_to', 'from_ to')
_a = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0454, 264.172),
'cubicyard': from_to(0.7_6455, 1.3_0795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.0_0023_6588, 4226.75),
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ", ".join(__lowerCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ", ".join(__lowerCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : str ) -> Dict:
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" ,[False, True] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ) -> Optional[Any]:
_a : Any =tmp_path / """cache"""
_a : int ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : Tuple =SqlDatasetReader(
"""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase ,_UpperCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" ,[
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ) -> List[Any]:
_a : Union[str, Any] =tmp_path / """cache"""
_a : str ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_a : Optional[int] =features.copy() if features else default_expected_features
_a : Union[str, Any] =(
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : Optional[Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
with contextlib.closing(sqlitea.connect(_UpperCAmelCase ) ) as con:
_a : Any =con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> Union[str, Any]:
_a : Union[str, Any] =tmp_path / """cache"""
_a : Union[str, Any] =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Tuple =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=1 ).write()
_a : Tuple =iter_sql_file(_UpperCAmelCase )
_a : List[Any] =iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase ,_UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[Any] ) -> Optional[int]:
_a : int =tmp_path / """cache"""
_a : Any =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Union[str, Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=2 ).write()
_a : List[Any] =iter_sql_file(_UpperCAmelCase )
_a : str =iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase ,_UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ) -> List[str]:
_a : List[str] =tmp_path / """cache"""
_a : Dict =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Optional[Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
with pytest.raises(_UpperCAmelCase ):
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=0 ).write()
| 276 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
# Initialise PyTorch model
__UpperCamelCase =AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'Building PyTorch model from configuration: {config}' )
__UpperCamelCase =AlbertForPreTraining(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 62 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: List[str] = logging.get_logger(__name__)
A__: Union[str, Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : int = "data2vec-text"
def __init__( self :str , SCREAMING_SNAKE_CASE :Optional[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Any=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Dict=3_0_7_2 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :List[str]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :int=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Dict=1e-12 , SCREAMING_SNAKE_CASE :int=1 , SCREAMING_SNAKE_CASE :Dict=0 , SCREAMING_SNAKE_CASE :List[Any]=2 , SCREAMING_SNAKE_CASE :str="absolute" , SCREAMING_SNAKE_CASE :Tuple=True , SCREAMING_SNAKE_CASE :Union[str, Any]=None , **SCREAMING_SNAKE_CASE :Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : Optional[Any] =vocab_size
_a : Optional[Any] =hidden_size
_a : Any =num_hidden_layers
_a : List[str] =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Any =intermediate_size
_a : str =hidden_dropout_prob
_a : Optional[Any] =attention_probs_dropout_prob
_a : Optional[Any] =max_position_embeddings
_a : Union[str, Any] =type_vocab_size
_a : Tuple =initializer_range
_a : Optional[int] =layer_norm_eps
_a : Tuple =position_embedding_type
_a : int =use_cache
_a : List[str] =classifier_dropout
class A__ ( UpperCAmelCase__ ):
@property
def __UpperCAmelCase ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_a : Tuple ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_a : List[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 276 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : float , lowercase : int ) -> float:
if digit_amount > 0:
return round(number - int(lowercase ) , lowercase )
return number - int(lowercase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 63 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
A__: Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> int:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] = None ) -> Optional[int]:
_a : Any =tesseract_config if tesseract_config is not None else """"""
# apply OCR
_a : Optional[Any] =to_pil_image(_UpperCAmelCase )
_a , _a : List[Any] =pil_image.size
_a : List[str] =pytesseract.image_to_data(_UpperCAmelCase ,lang=_UpperCAmelCase ,output_type="""dict""" ,config=_UpperCAmelCase )
_a , _a , _a , _a , _a : str =data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_a : Tuple =[idx for idx, word in enumerate(_UpperCAmelCase ) if not word.strip()]
_a : List[Any] =[word for idx, word in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Dict =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : List[str] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : List[str] =[]
for x, y, w, h in zip(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
_a : int =[x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase )
# finally, normalize the bounding boxes
_a : str =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[Any] = ["pixel_values"]
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = "" , **SCREAMING_SNAKE_CASE :Tuple , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : List[Any] =size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_a : Tuple =get_size_dict(SCREAMING_SNAKE_CASE )
_a : Dict =do_resize
_a : Tuple =size
_a : str =resample
_a : Dict =apply_ocr
_a : Union[str, Any] =ocr_lang
_a : Dict =tesseract_config
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Dict , ) -> np.ndarray:
'''simple docstring'''
_a : int =get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
_a : Any =(size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :ImageInput , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE :ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
_a : Optional[int] =do_resize if do_resize is not None else self.do_resize
_a : Optional[int] =size if size is not None else self.size
_a : str =get_size_dict(SCREAMING_SNAKE_CASE )
_a : List[str] =resample if resample is not None else self.resample
_a : int =apply_ocr if apply_ocr is not None else self.apply_ocr
_a : str =ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Union[str, Any] =tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[str] =make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_a : List[Any] =[to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_a : Any =[]
_a : Any =[]
for image in images:
_a , _a : int =apply_tesseract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
words_batch.append(SCREAMING_SNAKE_CASE )
boxes_batch.append(SCREAMING_SNAKE_CASE )
if do_resize:
_a : Union[str, Any] =[self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_a : Dict =[flip_channel_order(SCREAMING_SNAKE_CASE ) for image in images]
_a : str =[to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
_a : str =BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE )
if apply_ocr:
_a : List[Any] =words_batch
_a : Dict =boxes_batch
return data
| 276 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 64 |
'''simple docstring'''
from __future__ import annotations
import requests
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> dict:
_a : Any =F"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(_UpperCAmelCase ).json()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> list[dict]:
_a : Union[str, Any] ="""https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
_a : int =requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> str:
_a : Union[str, Any] =hackernews_top_stories(_UpperCAmelCase )
return "\n".join("""* [{title}]({url})""".format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 276 | 0 |
import math
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
return math.sqrt(__A ) * math.sqrt(__A ) == num
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = n
while left <= right:
UpperCAmelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase__ = mid - 1
else:
UpperCAmelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : torch.FloatTensor
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :Tuple[str] = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[str] = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[int] = (6_4,) , SCREAMING_SNAKE_CASE :int = 1 , SCREAMING_SNAKE_CASE :str = "silu" , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :int = 2_5_6 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :float = 0.18_215 , SCREAMING_SNAKE_CASE :str = "group" , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_a : Union[str, Any] =Encoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , down_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , double_z=SCREAMING_SNAKE_CASE , )
_a : Optional[int] =vq_embed_dim if vq_embed_dim is not None else latent_channels
_a : Optional[int] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
_a : str =VectorQuantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beta=0.25 , remap=SCREAMING_SNAKE_CASE , sane_index_shape=SCREAMING_SNAKE_CASE )
_a : List[str] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
# pass init params to Decoder
_a : List[str] =Decoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , up_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , norm_type=SCREAMING_SNAKE_CASE , )
@apply_forward_hook
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> VQEncoderOutput:
'''simple docstring'''
_a : Optional[int] =self.encoder(SCREAMING_SNAKE_CASE )
_a : int =self.quant_conv(SCREAMING_SNAKE_CASE )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE )
@apply_forward_hook
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
_a , _a , _a : Tuple =self.quantize(SCREAMING_SNAKE_CASE )
else:
_a : str =h
_a : Dict =self.post_quant_conv(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =self.decoder(SCREAMING_SNAKE_CASE , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
_a : Tuple =sample
_a : int =self.encode(SCREAMING_SNAKE_CASE ).latents
_a : List[Any] =self.decode(SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE )
| 276 | 0 |
"""simple docstring"""
def A_ ( _lowercase = 50 ):
'''simple docstring'''
snake_case_ :Dict = [1] * (length + 1)
for row_length in range(3, length + 1 ):
for block_length in range(3, row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 66 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int]=1_3 , SCREAMING_SNAKE_CASE :Optional[int]=7 , SCREAMING_SNAKE_CASE :Tuple=False , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :List[str]=3_3 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Tuple=5 , SCREAMING_SNAKE_CASE :int=4 , SCREAMING_SNAKE_CASE :Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Optional[Any]=0.1 , SCREAMING_SNAKE_CASE :Tuple=0.1 , SCREAMING_SNAKE_CASE :str=5_1_2 , SCREAMING_SNAKE_CASE :Dict=1_6 , SCREAMING_SNAKE_CASE :Dict=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :str=3 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :List[str]=None , ) -> Union[str, Any]:
'''simple docstring'''
_a : Union[str, Any] =parent
_a : List[Any] =batch_size
_a : Optional[int] =seq_length
_a : Union[str, Any] =is_training
_a : List[Any] =use_input_mask
_a : Optional[int] =use_token_type_ids
_a : int =use_labels
_a : List[str] =vocab_size
_a : List[Any] =hidden_size
_a : int =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Any =intermediate_size
_a : str =hidden_act
_a : Union[str, Any] =hidden_dropout_prob
_a : Union[str, Any] =attention_probs_dropout_prob
_a : str =max_position_embeddings
_a : Dict =type_vocab_size
_a : Tuple =type_sequence_label_size
_a : Dict =initializer_range
_a : List[str] =num_labels
_a : Tuple =num_choices
_a : int =scope
def __UpperCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : List[Any] =None
if self.use_input_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
_a : str =None
_a : Dict =None
if self.use_labels:
_a : Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[str] =ids_tensor([self.batch_size] , self.num_choices )
_a : List[Any] =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int ) -> Tuple:
'''simple docstring'''
_a : Any =EsmModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Optional[Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
_a : str =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : str =EsmForMaskedLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.num_labels
_a : Tuple =EsmForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Tuple =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
_a : Optional[Any] =self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Any =config_and_inputs
_a : List[Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Any = False
__UpperCamelCase : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : str = ()
__UpperCamelCase : List[str] = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Union[str, Any] = True
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
_a : Dict =EsmModelTester(self )
_a : Optional[Any] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCAmelCase ( self :Tuple ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Dict =type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
_a : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
_a : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :str ) -> Dict:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Union[str, Any] =EsmModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
_a : Optional[Any] =self.model_tester.prepare_config_and_inputs()[0]
_a : Dict =EsmEmbeddings(config=SCREAMING_SNAKE_CASE )
_a : Tuple =torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
_a : Optional[Any] =torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_a : Any =create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =self.model_tester.prepare_config_and_inputs()[0]
_a : Optional[int] =EsmEmbeddings(config=SCREAMING_SNAKE_CASE )
_a : Tuple =torch.empty(2 , 4 , 3_0 )
_a : str =[
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_a : int =torch.as_tensor([expected_single_positions, expected_single_positions] )
_a : Any =embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def __UpperCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def __UpperCAmelCase ( self :str ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
pass
@require_torch
class A__ ( UpperCAmelCase__ ):
@slow
def __UpperCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
with torch.no_grad():
_a : Optional[int] =EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_a : Any =torch.tensor([[0, 1, 2, 3, 4, 5]] )
_a : Tuple =model(SCREAMING_SNAKE_CASE )[0]
_a : int =3_3
_a : Tuple =torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
_a : Any =EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_a : Any =torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_a : int =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 276 | 0 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__UpperCAmelCase =False
try:
__UpperCAmelCase =_is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class a__ :
def __init__( self : Dict , a : str = None , a : list = [] ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = choices
__lowerCamelCase = prompt
if sys.platform == "win32":
__lowerCamelCase = '''*'''
else:
__lowerCamelCase = '''➔ '''
def SCREAMING_SNAKE_CASE__ ( self : str , a : int , a : str = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a )
else:
forceWrite(self.choices[index] , a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : int ):
"""simple docstring"""
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Direction , a : int = 1 ):
"""simple docstring"""
__lowerCamelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a )
move_cursor(a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a )] for number in range(10 )] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = int(chr(self.current_selection ) )
__lowerCamelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a )
else:
return
else:
return
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
__lowerCamelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase = int(builtins.input() )
except ValueError:
__lowerCamelCase = default_choice
else:
__lowerCamelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(a , '''\n''' )
return choice
| 67 |
'''simple docstring'''
from math import isqrt
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 ,isqrt(_UpperCAmelCase ) + 1 ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10**6 ) -> int:
_a : List[Any] =0
_a : str =1
_a : Optional[Any] =7
while prime_candidate < max_prime:
primes_count += is_prime(_UpperCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 276 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
'''simple docstring'''
try:
A__ = int(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A__ = 2
A__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A__ = i
while n % i == 0:
A__ = n // i
i += 1
return int(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 68 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 276 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self) -> Optional[Any]:
snake_case_ = (3, 32, 128)
snake_case_ = tempfile.mkdtemp()
# fmt: off
snake_case_ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
snake_case_ = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__))))
snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(lowerCAmelCase__) + '\n')
snake_case_ = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 128},
}
snake_case_ = os.path.join(self.tmpdirname, lowerCAmelCase__)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> Union[str, Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self) -> Tuple:
shutil.rmtree(self.tmpdirname)
def a_ ( self) -> Any:
snake_case_ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)
snake_case_ = Image.fromarray(np.moveaxis(lowerCAmelCase__, 0, -1))
return image_input
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
processor.save_pretrained(self.tmpdirname)
snake_case_ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCAmelCase__)
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer, lowerCAmelCase__)
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor.image_processor, lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
processor.save_pretrained(self.tmpdirname)
snake_case_ = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
snake_case_ = self.get_image_processor(do_normalize=lowerCAmelCase__, padding_value=1.0)
snake_case_ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCAmelCase__, padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer, lowerCAmelCase__)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, lowerCAmelCase__)
def a_ ( self) -> Any:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(lowerCAmelCase__, return_tensors='np')
snake_case_ = processor(images=lowerCAmelCase__, return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2)
def a_ ( self) -> str:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'test'
snake_case_ = processor(text=lowerCAmelCase__)
snake_case_ = tokenizer(lowerCAmelCase__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def a_ ( self) -> Tuple:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'test'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), ['pixel_values', 'labels'])
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__):
processor()
def a_ ( self) -> List[Any]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.char_decode(lowerCAmelCase__)
snake_case_ = tokenizer.batch_decode(lowerCAmelCase__)
snake_case_ = [seq.replace(' ', '') for seq in decoded_tok]
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = None
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
def a_ ( self) -> List[Any]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = torch.randn(1, 27, 38)
snake_case_ = torch.randn(1, 27, 5_0257)
snake_case_ = torch.randn(1, 27, 3_0522)
snake_case_ = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()), ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'])
| 69 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" ,[
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(_UpperCAmelCase ,i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Dict ) -> Optional[Any]:
_a : Tuple =_distribute_shards(**_UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" ,[
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any] ) -> List[str]:
_a : List[str] =_split_gen_kwargs(_UpperCAmelCase ,_UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" ,[
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
if expected is RuntimeError:
with pytest.raises(_UpperCAmelCase ):
_number_of_shards_in_gen_kwargs(_UpperCAmelCase )
else:
_a : Dict =_number_of_shards_in_gen_kwargs(_UpperCAmelCase )
assert out == expected
| 276 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : List[str] =logging.get_logger(__name__)
A__ : Any ='''▁'''
A__ : List[Any] ={'''vocab_file''': '''spiece.model'''}
A__ : List[Any] ={
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
A__ : str ={
'''google/reformer-crime-and-punishment''': 52_42_88,
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Optional[Any] = VOCAB_FILES_NAMES
_lowercase: Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any="</s>" , __snake_case : Dict="<unk>" , __snake_case : List[str]=[] , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Union[str, Any] , ) -> None:
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__snake_case , unk_token=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_lowerCAmelCase = vocab_file
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__snake_case )
@property
def lowercase__ ( self : Union[str, Any] ) -> str:
return self.sp_model.get_piece_size()
def lowercase__ ( self : Optional[int] ) -> Dict[str, int]:
_lowerCAmelCase = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> List[str]:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self : str , __snake_case : Dict ) -> Dict:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self : int , __snake_case : str ) -> List[str]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def lowercase__ ( self : str , __snake_case : Any ) -> Optional[int]:
return self.sp_model.piece_to_id(__snake_case )
def lowercase__ ( self : Optional[Any] , __snake_case : str ) -> Optional[Any]:
if index < self.sp_model.get_piece_size():
_lowerCAmelCase = self.sp_model.IdToPiece(__snake_case )
return token
def lowercase__ ( self : List[Any] , __snake_case : str ) -> int:
_lowerCAmelCase = []
_lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__snake_case ) + token
_lowerCAmelCase = []
else:
current_sub_tokens.append(__snake_case )
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def lowercase__ ( self : int , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , """wb""" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 70 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Tuple = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "roc_bert"
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :Tuple=3_0_5_2_2 , SCREAMING_SNAKE_CASE :List[str]=7_6_8 , SCREAMING_SNAKE_CASE :Dict=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE :List[Any]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Optional[int]="absolute" , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :Optional[Any]=9_1_0 , SCREAMING_SNAKE_CASE :Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE :str=2_4_8_5_8 , SCREAMING_SNAKE_CASE :List[Any]=True , **SCREAMING_SNAKE_CASE :Tuple , ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =vocab_size
_a : List[str] =max_position_embeddings
_a : Optional[Any] =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[str] =num_attention_heads
_a : int =intermediate_size
_a : Any =hidden_act
_a : Dict =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : str =initializer_range
_a : Optional[int] =type_vocab_size
_a : Any =layer_norm_eps
_a : Any =use_cache
_a : Optional[int] =enable_pronunciation
_a : Optional[Any] =enable_shape
_a : Optional[Any] =pronunciation_embed_dim
_a : Tuple =pronunciation_vocab_size
_a : Union[str, Any] =shape_embed_dim
_a : Any =shape_vocab_size
_a : Tuple =concat_input
_a : List[str] =position_embedding_type
_a : List[str] =classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 276 | 0 |
def A ( a_ ) -> int:
if not isinstance(a_ ,a_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
__UpperCamelCase : Any =0
__UpperCamelCase : List[str] =str(a_ )
while len(a_ ) != 1:
__UpperCamelCase : Optional[int] =[int(a_ ) for i in num_string]
__UpperCamelCase : List[Any] =1
for i in range(0 ,len(a_ ) ):
total *= numbers[i]
__UpperCamelCase : List[str] =str(a_ )
steps += 1
return steps
def A ( a_ ) -> int:
if not isinstance(a_ ,a_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
__UpperCamelCase : Union[str, Any] =0
__UpperCamelCase : str =str(a_ )
while len(a_ ) != 1:
__UpperCamelCase : Any =[int(a_ ) for i in num_string]
__UpperCamelCase : List[Any] =0
for i in range(0 ,len(a_ ) ):
total += numbers[i]
__UpperCamelCase : Any =str(a_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
'''simple docstring'''
class A__ :
def __init__( self :List[str] ) -> List[Any]:
'''simple docstring'''
_a : Tuple =0
_a : Any =0
_a : int ={}
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :List[str] ) -> Optional[int]:
'''simple docstring'''
if vertex not in self.adjacency:
_a : Dict ={}
self.num_vertices += 1
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Any ) -> List[str]:
'''simple docstring'''
self.add_vertex(SCREAMING_SNAKE_CASE )
self.add_vertex(SCREAMING_SNAKE_CASE )
if head == tail:
return
_a : Any =weight
_a : Tuple =weight
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =self.get_edges()
for edge in edges:
_a , _a , _a : List[str] =edge
edges.remove((tail, head, weight) )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
_a : str =list(edges[i] )
edges.sort(key=lambda SCREAMING_SNAKE_CASE : e[2] )
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a : Union[str, Any] =edges[i][2] + 1
for edge in edges:
_a , _a , _a : Tuple =edge
_a : Tuple =weight
_a : List[Any] =weight
def __str__( self :int ) -> str:
'''simple docstring'''
_a : int =""""""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a : str =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("""\n""" )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : Union[str, Any] =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[Any]=None ) -> Optional[int]:
'''simple docstring'''
_a : str =Graph()
if vertices is None:
_a : Union[str, Any] =[]
if edges is None:
_a : List[Any] =[]
for vertex in vertices:
g.add_vertex(SCREAMING_SNAKE_CASE )
for edge in edges:
g.add_edge(*SCREAMING_SNAKE_CASE )
return g
class A__ :
def __init__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_a : Optional[int] ={}
_a : List[str] ={}
def __len__( self :List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.parent )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Tuple ) -> Dict:
'''simple docstring'''
if item in self.parent:
return self.find(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =item
_a : List[str] =0
return item
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Dict ) -> List[str]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(SCREAMING_SNAKE_CASE )
if item != self.parent[item]:
_a : str =self.find(self.parent[item] )
return self.parent[item]
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :List[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[int] =self.find(SCREAMING_SNAKE_CASE )
_a : Dict =self.find(SCREAMING_SNAKE_CASE )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a : Any =roota
return roota
if self.rank[roota] < self.rank[roota]:
_a : List[str] =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a : List[Any] =roota
return roota
return None
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict ) -> Union[str, Any]:
'''simple docstring'''
_a : Any =graph.num_vertices
_a : Union[str, Any] =Graph.UnionFind()
_a : Optional[int] =[]
while num_components > 1:
_a : str ={}
for vertex in graph.get_vertices():
_a : List[str] =-1
_a : Any =graph.get_edges()
for edge in edges:
_a , _a , _a : Tuple =edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a : Any =edge
_a : Any =union_find.find(SCREAMING_SNAKE_CASE )
_a : List[Any] =union_find.find(SCREAMING_SNAKE_CASE )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a : Optional[int] =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a : List[Any] =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a : Optional[Any] =cheap_edge[vertex]
if union_find.find(SCREAMING_SNAKE_CASE ) != union_find.find(SCREAMING_SNAKE_CASE ):
union_find.union(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
mst_edges.append(cheap_edge[vertex] )
_a : str =num_components - 1
_a : str =Graph.build(edges=SCREAMING_SNAKE_CASE )
return mst
| 276 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A__: Union[str, Any] = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
A__: Tuple = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
A__: Union[str, Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
A__: List[Any] = requests.get(image_url).content
A__: List[str] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 276 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
a =TypeVar("""T""")
class A_ ( Generic[T] ):
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : list[T] ,SCREAMING_SNAKE_CASE__ : Callable[[T, T], T]):
__lowerCamelCase : Any | T = None
__lowerCamelCase : int = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : list[T] = [any_type for _ in range(self.N)] + arr
__lowerCamelCase : Optional[int] = fnc
self.build()
def lowerCAmelCase ( self : Dict):
for p in range(self.N - 1 ,0 ,-1):
__lowerCamelCase : List[str] = self.fn(self.st[p * 2] ,self.st[p * 2 + 1])
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : T):
p += self.N
__lowerCamelCase : Dict = v
while p > 1:
__lowerCamelCase : List[Any] = p // 2
__lowerCamelCase : List[Any] = self.fn(self.st[p * 2] ,self.st[p * 2 + 1])
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int): # noqa: E741
__lowerCamelCase , __lowerCamelCase : Dict = l + self.N, r + self.N
__lowerCamelCase : T | None = None
while l <= r:
if l % 2 == 1:
__lowerCamelCase : Dict = self.st[l] if res is None else self.fn(SCREAMING_SNAKE_CASE__ ,self.st[l])
if r % 2 == 0:
__lowerCamelCase : str = self.st[r] if res is None else self.fn(SCREAMING_SNAKE_CASE__ ,self.st[r])
__lowerCamelCase , __lowerCamelCase : Any = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
a =[1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
a ={
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
a =SegmentTree(test_array, min)
a =SegmentTree(test_array, max)
a =SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
for i in range(len(lowerCamelCase__ ) ):
for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ):
__lowerCamelCase : Any = reduce(lowerCamelCase__ , test_array[i : j + 1] )
__lowerCamelCase : Union[str, Any] = reduce(lowerCamelCase__ , test_array[i : j + 1] )
__lowerCamelCase : Dict = reduce(lambda lowerCamelCase__ , lowerCamelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
assert max_range == max_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
assert sum_range == sum_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
test_all_segments()
for index, value in test_updates.items():
a =value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 73 |
'''simple docstring'''
A__: Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 276 | 0 |
"""simple docstring"""
import argparse
import json
import subprocess
def _snake_case ( snake_case__ : str , snake_case__ : List[Any] ):
A = []
A = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
A = subprocess.run(snake_case__ , shell=snake_case__ , stdout=subprocess.PIPE )
A = output.stdout.decode('utf-8' )
A = json.loads(snake_case__ )
A = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(snake_case__ )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(snake_case__ ) )
if len(snake_case__ ) > 0:
A = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def _snake_case ( snake_case__ : List[Any] ):
return values.split(',' )
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
_lowercase = parser.parse_args()
get_runner_status(args.target_runners, args.token) | 74 |
'''simple docstring'''
A__: Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__: Any = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__: int = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> str:
assert len(str(_UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_a : List[str] =year // 100
_a : List[str] =(5 * (century % 4) + 2) % 7
_a : Optional[int] =year % 100
_a : Any =centurian % 12
_a : int =(
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_a : Optional[Any] =(
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_a : str =(dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 | 0 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a_ : Tuple = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
a_ : Dict = direct_transformers_import(PATH_TO_TRANSFORMERS)
a_ : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
a_ : Tuple = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def a_ ( __snake_case : Any , __snake_case : Tuple , __snake_case : Dict , __snake_case : str ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
lowerCamelCase_ =True
# Deal with multi-line cases
elif (
re.search(
rF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , __snake_case , )
is not None
):
lowerCamelCase_ =True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCamelCase_ =True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCamelCase_ =[
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
lowerCamelCase_ =['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
lowerCamelCase_ =True
if not attribute_used:
lowerCamelCase_ =False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCamelCase_ =True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCamelCase_ =True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCamelCase_ =True
elif attribute.endswith('''_token_id''' ):
lowerCamelCase_ =True
# configuration class specific cases
if not case_allowed:
lowerCamelCase_ =SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowerCamelCase_ =allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def a_ ( __snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =dict(inspect.signature(config_class.__init__ ).parameters )
lowerCamelCase_ =[x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
lowerCamelCase_ =[signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCamelCase_ ={}
if len(config_class.attribute_map ) > 0:
lowerCamelCase_ ={v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCamelCase_ =inspect.getsourcefile(__snake_case )
lowerCamelCase_ =os.path.dirname(__snake_case )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCamelCase_ =[os.path.join(__snake_case , __snake_case ) for fn in os.listdir(__snake_case ) if fn.startswith('''modeling_''' )]
# Get the source code strings
lowerCamelCase_ =[]
for path in modeling_paths:
if os.path.isfile(__snake_case ):
with open(__snake_case ) as fp:
modeling_sources.append(fp.read() )
lowerCamelCase_ =[]
for config_param, default_value in zip(__snake_case , __snake_case ):
# `attributes` here is all the variant names for `config_param`
lowerCamelCase_ =[config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__snake_case , __snake_case , __snake_case , __snake_case ):
unused_attributes.append(attributes[0] )
return sorted(__snake_case )
def a_ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCamelCase_ =[
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __snake_case : inspect.isclass(__snake_case )
and issubclass(__snake_case , __snake_case )
and inspect.getmodule(__snake_case ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowerCamelCase_ =check_config_attributes_being_used(__snake_case )
if len(__snake_case ) > 0:
lowerCamelCase_ =unused_attributes
if len(__snake_case ) > 0:
lowerCamelCase_ ='''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(__snake_case )
if __name__ == "__main__":
check_config_attributes()
| 75 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str
__UpperCamelCase : int
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> list[str]:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(_UpperCAmelCase ) )]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_a : List[Any] =all_rotations(_UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_a : BWTTransformDict ={
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCAmelCase ),
}
return response
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_a : List[str] =int(_UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(_UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_a : Optional[int] =[""""""] * len(_UpperCAmelCase )
for _ in range(len(_UpperCAmelCase ) ):
for i in range(len(_UpperCAmelCase ) ):
_a : int =bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
A__: Any = '''Provide a string that I will generate its BWT transform: '''
A__: Union[str, Any] = input(entry_msg).strip()
A__: Optional[int] = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
A__: Union[str, Any] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 276 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
SCREAMING_SNAKE_CASE : Optional[int] = DetaConfig(
backbone_config=_a , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=_a , with_box_refine=_a , two_stage=_a , )
# set labels
SCREAMING_SNAKE_CASE : Tuple = "huggingface/label-files"
if "o365" in model_name:
SCREAMING_SNAKE_CASE : Optional[int] = 366
SCREAMING_SNAKE_CASE : Optional[Any] = "object365-id2label.json"
else:
SCREAMING_SNAKE_CASE : str = 91
SCREAMING_SNAKE_CASE : Tuple = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE : int = num_labels
SCREAMING_SNAKE_CASE : str = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type="dataset")) , "r"))
SCREAMING_SNAKE_CASE : Union[str, Any] = {int(_a): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[str] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight"))
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias"))
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight"))
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias"))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias"))
if i < 3:
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.reduction.weight", f"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.weight", f"model.backbone.model.encoder.layers.{i}.downsample.norm.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.bias", f"model.backbone.model.encoder.layers.{i}.downsample.norm.bias"))
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight"))
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias"))
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight"))
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias"))
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight"))
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias"))
# transformer encoder
for i in range(config.encoder_layers):
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias"))
# transformer decoder
for i in range(config.decoder_layers):
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias"))
# fmt: on
return rename_keys
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Tuple = dct.pop(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = val
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Any = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
SCREAMING_SNAKE_CASE : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight")
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : Any = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : int = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase__ ( _a , _a):
# transformer decoder self-attention layers
SCREAMING_SNAKE_CASE : List[str] = config.d_model
for i in range(config.decoder_layers):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:hidden_size, :]
SCREAMING_SNAKE_CASE : str = in_proj_bias[:hidden_size]
SCREAMING_SNAKE_CASE : int = in_proj_weight[
hidden_size : hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE : Any = in_proj_weight[-hidden_size:, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[-hidden_size:]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(_a , stream=_a).raw)
return im
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : int = get_deta_config(_a)
# load original state dict
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE : Any = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth")
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE : Any = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth")
else:
raise ValueError(f"Model name {model_name} not supported")
SCREAMING_SNAKE_CASE : List[Any] = torch.load(_a , map_location="cpu")["model"]
# original state dict
for name, param in state_dict.items():
print(_a , param.shape)
# rename keys
SCREAMING_SNAKE_CASE : Optional[Any] = create_rename_keys(_a)
for src, dest in rename_keys:
rename_key(_a , _a , _a)
read_in_swin_q_k_v(_a , config.backbone_config)
read_in_decoder_q_k_v(_a , _a)
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Optional[int] = val
if "input_proj" in key:
SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : str = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : str = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : str = DetaForObjectDetection(_a)
model.load_state_dict(_a)
model.eval()
SCREAMING_SNAKE_CASE : str = "cuda" if torch.cuda.is_available() else "cpu"
model.to(_a)
# load image processor
SCREAMING_SNAKE_CASE : int = DetaImageProcessor(format="coco_detection")
# verify our conversion on image
SCREAMING_SNAKE_CASE : List[str] = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(images=_a , return_tensors="pt")
SCREAMING_SNAKE_CASE : List[Any] = encoding["pixel_values"]
SCREAMING_SNAKE_CASE : Dict = model(pixel_values.to(_a))
# verify logits
print("Logits:" , outputs.logits[0, :3, :3])
print("Boxes:" , outputs.pred_boxes[0, :3, :3])
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]])
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]])
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]])
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]])
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_a) , atol=1E-4)
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_a) , atol=1E-4)
print("Everything ok!")
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}...")
Path(_a).mkdir(exist_ok=_a)
model.save_pretrained(_a)
processor.save_pretrained(_a)
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub...")
model.push_to_hub(f"jozhang97/{model_name}")
processor.push_to_hub(f"jozhang97/{model_name}")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 76 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__: List[str] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Optional[int] = ['''ChineseCLIPFeatureExtractor''']
A__: Any = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Dict = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : List[str] = ["image_processor", "tokenizer"]
lowerCamelCase__ : List[str] = "CLIPImageProcessor"
lowerCamelCase__ : Optional[int] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , a=None , a=None , **a ) -> Union[str, Any]:
lowercase__ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
lowercase__ : List[Any] = kwargs.pop('feature_extractor' )
lowercase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self , a=None , a=None , a=None , **a ) -> int:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowercase__ : Tuple = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
lowercase__ : str = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
lowercase__ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _UpperCAmelCase ( self , *a , **a ) -> List[str]:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> Union[str, Any]:
return self.tokenizer.decode(*a , **a )
@property
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[str] = self.tokenizer.model_input_names
lowercase__ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCAmelCase ( self ) -> List[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self ) -> Any:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , )
return self.image_processor
| 77 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
_a : dict[str, TrieNode] ={} # Mapping from char to TrieNode
_a : List[str] =False
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
_a : str =self
for char in word:
if char not in curr.nodes:
_a : Dict =TrieNode()
_a : List[Any] =curr.nodes[char]
_a : int =True
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> bool:
'''simple docstring'''
_a : int =self
for char in word:
if char not in curr.nodes:
return False
_a : List[Any] =curr.nodes[char]
return curr.is_leaf
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
def _delete(SCREAMING_SNAKE_CASE :TrieNode , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :int ) -> bool:
if index == len(SCREAMING_SNAKE_CASE ):
# If word does not exist
if not curr.is_leaf:
return False
_a : Any =False
return len(curr.nodes ) == 0
_a : int =word[index]
_a : int =curr.nodes.get(SCREAMING_SNAKE_CASE )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_a : List[Any] =_delete(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , SCREAMING_SNAKE_CASE , 0 )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : TrieNode ,_UpperCAmelCase : str ) -> None:
if node.is_leaf:
print(_UpperCAmelCase ,end=""" """ )
for key, value in node.nodes.items():
print_words(_UpperCAmelCase ,word + key )
def SCREAMING_SNAKE_CASE_ ( ) -> bool:
_a : List[str] ="""banana bananas bandana band apple all beast""".split()
_a : List[Any] =TrieNode()
root.insert_many(_UpperCAmelCase )
# print_words(root, "")
assert all(root.find(_UpperCAmelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : bool ) -> None:
print(str(_UpperCAmelCase ) ,"""works!""" if passes else """doesn't work :(""" )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE_ ( ) -> None:
print_results("""Testing trie functionality""" ,test_trie() )
if __name__ == "__main__":
main()
| 276 | 0 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
snake_case_ = logging.get_logger(__name__)
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ):
# Recurse if needed
if "." in tensor_name:
UpperCAmelCase = tensor_name.split('.' )
for split in splits[:-1]:
UpperCAmelCase = getattr(lowercase_ , lowercase_ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCAmelCase = new_module
UpperCAmelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
UpperCAmelCase = tensor_name in module._buffers
UpperCAmelCase = getattr(lowercase_ , lowercase_ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
UpperCAmelCase = False
UpperCAmelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCAmelCase = False
UpperCAmelCase = False
else:
UpperCAmelCase = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCAmelCase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCAmelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCAmelCase = old_value.to(lowercase_ )
elif isinstance(lowercase_ , torch.Tensor ):
UpperCAmelCase = value.to('cpu' )
if value.dtype == torch.inta:
UpperCAmelCase = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
UpperCAmelCase = torch.tensor(lowercase_ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowercase_ ) and fpaa_statistics is None:
UpperCAmelCase = new_value.T
UpperCAmelCase = old_value.__dict__
if is_abit:
UpperCAmelCase = bnb.nn.IntaParams(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ )
elif is_abit:
UpperCAmelCase = bnb.nn.Paramsabit(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ )
UpperCAmelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(lowercase_ ) )
else:
if value is None:
UpperCAmelCase = old_value.to(lowercase_ )
elif isinstance(lowercase_ , torch.Tensor ):
UpperCAmelCase = value.to(lowercase_ )
else:
UpperCAmelCase = torch.tensor(lowercase_ , device=lowercase_ )
if is_buffer:
UpperCAmelCase = new_value
else:
UpperCAmelCase = nn.Parameter(lowercase_ , requires_grad=old_value.requires_grad )
UpperCAmelCase = new_value
def _lowerCAmelCase ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False ):
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase = []
current_key_name.append(lowercase_ )
if (isinstance(lowercase_ , nn.Linear ) or isinstance(lowercase_ , lowercase_ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(lowercase_ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase , UpperCAmelCase = module.weight.shape
else:
UpperCAmelCase = module.in_features
UpperCAmelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCAmelCase = bnb.nn.LinearabitLt(
lowercase_ , lowercase_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCAmelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCAmelCase = bnb.nn.Linearabit(
lowercase_ , lowercase_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCAmelCase = True
# Store the module class in case we need to transpose the weight later
UpperCAmelCase = type(lowercase_ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowercase_ )
if len(list(module.children() ) ) > 0:
UpperCAmelCase , UpperCAmelCase = _replace_with_bnb_linear(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_been_replaced=lowercase_ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowerCAmelCase ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ):
UpperCAmelCase = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
UpperCAmelCase , UpperCAmelCase = _replace_with_bnb_linear(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _lowerCAmelCase ( *lowercase_ , **lowercase_ ):
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , lowercase_ , )
return replace_with_bnb_linear(*lowercase_ , **lowercase_ )
def _lowerCAmelCase ( *lowercase_ , **lowercase_ ):
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , lowercase_ , )
return set_module_quantized_tensor_to_device(*lowercase_ , **lowercase_ )
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = deepcopy(lowercase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCAmelCase = find_tied_parameters(lowercase_ )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase = sum(lowercase_ , [] )
UpperCAmelCase = len(lowercase_ ) > 0
# Check if it is a base model
UpperCAmelCase = not hasattr(lowercase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase = list(model.named_children() )
UpperCAmelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase = set(lowercase_ ) - set(lowercase_ )
UpperCAmelCase = list(set(lowercase_ ) ) + list(lowercase_ )
# remove ".weight" from the keys
UpperCAmelCase = ['.weight', '.bias']
UpperCAmelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase = name.replace(lowercase_ , '' )
filtered_module_names.append(lowercase_ )
return filtered_module_names
| 78 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A__: str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276 | 0 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase_ = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowerCamelCase_ = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
_A = SavedModel()
_A = []
with open(os.path.join(__lowercase , "utils" , "tf_ops" , "onnx.json" ) ) as f:
_A = json.load(__lowercase )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__lowercase )] )
with open(__lowercase , "rb" ) as f:
saved_model.ParseFromString(f.read() )
_A = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_A = sorted(__lowercase )
_A = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__lowercase )
if strict and len(__lowercase ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__lowercase ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__lowercase , sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
lowerCamelCase_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 79 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
A__: str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : int ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Optional[Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
_a : Union[str, Any] =tensor_name.split(""".""" )
for split in splits[:-1]:
_a : Optional[Any] =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_a : Optional[int] =new_module
_a : Optional[int] =splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
_a : Optional[Any] =tensor_name in module._buffers
_a : str =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
_a : int =False
_a : Tuple =False
if is_buffer or not is_bitsandbytes_available():
_a : str =False
_a : Optional[Any] =False
else:
_a : int =hasattr(bnb.nn ,"""Params4bit""" ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
_a : int =isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
_a : Any =module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_a : int =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : str =value.to("""cpu""" )
if value.dtype == torch.inta:
_a : int =version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
_a : Dict =torch.tensor(_UpperCAmelCase ,device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_UpperCAmelCase ) and fpaa_statistics is None:
_a : int =new_value.T
_a : Any =old_value.__dict__
if is_abit:
_a : Any =bnb.nn.IntaParams(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
elif is_abit:
_a : Union[str, Any] =bnb.nn.Paramsabit(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
_a : List[Any] =new_value
if fpaa_statistics is not None:
setattr(module.weight ,"""SCB""" ,fpaa_statistics.to(_UpperCAmelCase ) )
else:
if value is None:
_a : str =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : Any =value.to(_UpperCAmelCase )
else:
_a : str =torch.tensor(_UpperCAmelCase ,device=_UpperCAmelCase )
if is_buffer:
_a : Optional[int] =new_value
else:
_a : Optional[Any] =nn.Parameter(_UpperCAmelCase ,requires_grad=old_value.requires_grad )
_a : Tuple =new_value
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : str=None ,_UpperCAmelCase : Union[str, Any]=False ) -> Dict:
for name, module in model.named_children():
if current_key_name is None:
_a : Optional[int] =[]
current_key_name.append(_UpperCAmelCase )
if (isinstance(_UpperCAmelCase ,nn.Linear ) or isinstance(_UpperCAmelCase ,_UpperCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(_UpperCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a , _a : int =module.weight.shape
else:
_a : List[str] =module.in_features
_a : Tuple =module.out_features
if quantization_config.quantization_method() == "llm_int8":
_a : Optional[Any] =bnb.nn.LinearabitLt(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
_a : Optional[Any] =True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_a : Dict =bnb.nn.Linearabit(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
_a : List[Any] =True
# Store the module class in case we need to transpose the weight later
_a : int =type(_UpperCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_UpperCAmelCase )
if len(list(module.children() ) ) > 0:
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,has_been_replaced=_UpperCAmelCase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Any=None ) -> Tuple:
_a : Dict =["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : Any ,**_UpperCAmelCase : Any ) -> str:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" ,_UpperCAmelCase ,)
return replace_with_bnb_linear(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : str ,**_UpperCAmelCase : Optional[int] ) -> Optional[int]:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" ,_UpperCAmelCase ,)
return set_module_quantized_tensor_to_device(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> Union[str, Any]:
_a : Any =deepcopy(_UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_a : List[Any] =find_tied_parameters(_UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str =sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
_a : Optional[int] =sum(_UpperCAmelCase ,[] )
_a : List[Any] =len(_UpperCAmelCase ) > 0
# Check if it is a base model
_a : Tuple =not hasattr(_UpperCAmelCase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_a : List[Any] =list(model.named_children() )
_a : Dict =[list_modules[-1][0]]
# add last module together with tied weights
_a : List[str] =set(_UpperCAmelCase ) - set(_UpperCAmelCase )
_a : str =list(set(_UpperCAmelCase ) ) + list(_UpperCAmelCase )
# remove ".weight" from the keys
_a : List[Any] =[""".weight""", """.bias"""]
_a : Any =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_a : Any =name.replace(_UpperCAmelCase ,"""""" )
filtered_module_names.append(_UpperCAmelCase )
return filtered_module_names
| 276 | 0 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _UpperCamelCase ( __A ) -> Union[str, Any]:
'''simple docstring'''
return EnvironmentCommand()
class lowercase_ ( a__ ):
@staticmethod
def __a ( a ):
UpperCamelCase__ = parser.add_parser("env" )
download_parser.set_defaults(func=a )
def __a ( self ):
UpperCamelCase__ = huggingface_hub.__version__
UpperCamelCase__ = "not installed"
UpperCamelCase__ = "NA"
if is_torch_available():
import torch
UpperCamelCase__ = torch.__version__
UpperCamelCase__ = torch.cuda.is_available()
UpperCamelCase__ = "not installed"
if is_transformers_available():
import transformers
UpperCamelCase__ = transformers.__version__
UpperCamelCase__ = "not installed"
if is_accelerate_available():
import accelerate
UpperCamelCase__ = accelerate.__version__
UpperCamelCase__ = "not installed"
if is_xformers_available():
import xformers
UpperCamelCase__ = xformers.__version__
UpperCamelCase__ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a ) )
return info
@staticmethod
def __a ( a ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 80 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A__: int = logging.getLogger(__name__)
@dataclass
class A__ :
__UpperCamelCase : str
__UpperCamelCase : List[str]
__UpperCamelCase : Optional[List[str]]
@dataclass
class A__ :
__UpperCamelCase : List[int]
__UpperCamelCase : List[int]
__UpperCamelCase : Optional[List[int]] = None
__UpperCamelCase : Optional[List[int]] = None
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = "train"
__UpperCamelCase : Tuple = "dev"
__UpperCamelCase : str = "test"
class A__ :
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[Split, str] ) -> List[InputExample]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :List[InputExample] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Optional[Any]="[CLS]" , SCREAMING_SNAKE_CASE :Optional[int]=1 , SCREAMING_SNAKE_CASE :Any="[SEP]" , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Union[str, Any]=False , SCREAMING_SNAKE_CASE :List[str]=0 , SCREAMING_SNAKE_CASE :str=0 , SCREAMING_SNAKE_CASE :Dict=-1_0_0 , SCREAMING_SNAKE_CASE :Optional[int]=0 , SCREAMING_SNAKE_CASE :Tuple=True , ) -> List[InputFeatures]:
'''simple docstring'''
_a : str ={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )}
_a : Tuple =[]
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
_a : Optional[Any] =[]
_a : List[Any] =[]
for word, label in zip(example.words , example.labels ):
_a : Optional[int] =tokenizer.tokenize(SCREAMING_SNAKE_CASE )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_a : Optional[int] =tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE ) > max_seq_length - special_tokens_count:
_a : List[Any] =tokens[: (max_seq_length - special_tokens_count)]
_a : Tuple =label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_a : Dict =[sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_a : Any =[cls_token] + tokens
_a : Dict =[pad_token_label_id] + label_ids
_a : Union[str, Any] =[cls_token_segment_id] + segment_ids
_a : List[str] =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_a : Optional[int] =[1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE )
# Zero-pad up to the sequence length.
_a : Union[str, Any] =max_seq_length - len(SCREAMING_SNAKE_CASE )
if pad_on_left:
_a : Optional[Any] =([pad_token] * padding_length) + input_ids
_a : Optional[int] =([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_a : Union[str, Any] =([pad_token_segment_id] * padding_length) + segment_ids
_a : Dict =([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_a : Tuple =None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self :Dict , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :int=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> List[str]:
'''simple docstring'''
# Load data features from cache or dataset file
_a : Optional[Any] =os.path.join(
SCREAMING_SNAKE_CASE , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : List[str] =cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE ):
if os.path.exists(SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
_a : Any =torch.load(SCREAMING_SNAKE_CASE )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
_a : Any =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[str] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , SCREAMING_SNAKE_CASE )
def __len__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :Dict , SCREAMING_SNAKE_CASE :int ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class A__ :
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = -100
def __init__( self :str , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> Any:
'''simple docstring'''
_a : Tuple =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[Any] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
_a : List[Any] =self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self :str ) -> Optional[int]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :int , SCREAMING_SNAKE_CASE :str ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
| 276 | 0 |
"""simple docstring"""
lowerCamelCase_ : Any = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager | 81 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> Optional[int]:
'''simple docstring'''
_a , _a : List[str] =text, pattern
_a , _a : Union[str, Any] =len(SCREAMING_SNAKE_CASE ), len(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __UpperCAmelCase ( self :Union[str, Any] ) -> list[int]:
'''simple docstring'''
# searches pattern in text and returns index positions
_a : Union[str, Any] =[]
for i in range(self.textLen - self.patLen + 1 ):
_a : Any =self.mismatch_in_text(SCREAMING_SNAKE_CASE )
if mismatch_index == -1:
positions.append(SCREAMING_SNAKE_CASE )
else:
_a : int =self.match_in_pattern(self.text[mismatch_index] )
_a : List[str] =(
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A__: Any = '''ABAABA'''
A__: int = '''AB'''
A__: Optional[int] = BoyerMooreSearch(text, pattern)
A__: Optional[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 276 | 0 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = BertTokenizer
__lowerCamelCase = BertTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = filter_non_english
def snake_case ( self ):
"""simple docstring"""
super().setUp()
_lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = """UNwant\u00E9d,running"""
_lowerCAmelCase = """unwanted, running"""
return input_text, output_text
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_snake_case , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [9, 6, 7, 12, 10, 11] )
def snake_case ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = """UNwant\u00E9d,running"""
_lowerCAmelCase = tokenizer.tokenize(_snake_case )
_lowerCAmelCase = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = tokenizer.encode(_snake_case )
_lowerCAmelCase = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# With lower casing
_lowerCAmelCase = self.get_tokenizer(do_lower_case=_snake_case )
_lowerCAmelCase = self.get_rust_tokenizer(do_lower_case=_snake_case )
_lowerCAmelCase = """UNwant\u00E9d,running"""
_lowerCAmelCase = tokenizer.tokenize(_snake_case )
_lowerCAmelCase = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = tokenizer.encode(_snake_case )
_lowerCAmelCase = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer()
_lowerCAmelCase = """a\n'll !!to?'d of, can't."""
_lowerCAmelCase = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(_snake_case ) , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_lowerCAmelCase = {}
for i, token in enumerate(_snake_case ):
_lowerCAmelCase = i
_lowerCAmelCase = WordpieceTokenizer(vocab=_snake_case , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def snake_case ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def snake_case ( self ):
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def snake_case ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_snake_case ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_snake_case ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
_lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=_snake_case )
_lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_snake_case )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_snake_case )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def snake_case ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
_lowerCAmelCase = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
_lowerCAmelCase = tokenizer_r.encode_plus(
_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case , )
_lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(_snake_case , """do_lower_case""" ) else False
_lowerCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = ["""的""", """人""", """有"""]
_lowerCAmelCase = """""".join(_snake_case )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase = True
_lowerCAmelCase = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
_lowerCAmelCase = tokenizer_p.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase = tokenizer_r.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(_snake_case )
_lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(_snake_case )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase = False
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
_lowerCAmelCase = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
_lowerCAmelCase = tokenizer_r.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase = tokenizer_p.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(_snake_case )
_lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(_snake_case )
# it is expected that only the first Chinese character is not preceded by "##".
_lowerCAmelCase = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(_snake_case )
]
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , _snake_case )
| 82 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
A__: Dict = None
A__: Tuple = {
'''7B''': 1_1008,
'''13B''': 1_3824,
'''30B''': 1_7920,
'''65B''': 2_2016,
'''70B''': 2_8672,
}
A__: Any = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int]=1 ,_UpperCAmelCase : List[str]=256 ) -> Dict:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ) -> List[str]:
with open(_UpperCAmelCase ,"""r""" ) as f:
return json.load(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ) -> Tuple:
with open(_UpperCAmelCase ,"""w""" ) as f:
json.dump(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any]=True ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
_a : Union[str, Any] =os.path.join(_UpperCAmelCase ,"""tmp""" )
os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
_a : int =read_json(os.path.join(_UpperCAmelCase ,"""params.json""" ) )
_a : int =NUM_SHARDS[model_size]
_a : Dict =params["""n_layers"""]
_a : Union[str, Any] =params["""n_heads"""]
_a : List[str] =n_heads // num_shards
_a : int =params["""dim"""]
_a : Union[str, Any] =dim // n_heads
_a : int =1_0_0_0_0.0
_a : str =1.0 / (base ** (torch.arange(0 ,_UpperCAmelCase ,2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_a : str =params["""n_kv_heads"""] # for GQA / MQA
_a : Optional[Any] =n_heads_per_shard // num_key_value_heads
_a : Optional[int] =dim // num_key_value_heads
else: # compatibility with other checkpoints
_a : str =n_heads
_a : Any =n_heads_per_shard
_a : str =dim
# permute for sliced rotary
def permute(_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int]=n_heads ,_UpperCAmelCase : Optional[int]=dim ,_UpperCAmelCase : List[str]=dim ):
return w.view(_UpperCAmelCase ,dima // n_heads // 2 ,2 ,_UpperCAmelCase ).transpose(1 ,2 ).reshape(_UpperCAmelCase ,_UpperCAmelCase )
print(F"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_a : Any =torch.load(os.path.join(_UpperCAmelCase ,"""consolidated.00.pth""" ) ,map_location="""cpu""" )
else:
# Sharded
_a : List[Any] =[
torch.load(os.path.join(_UpperCAmelCase ,F"consolidated.{i:02d}.pth" ) ,map_location="""cpu""" )
for i in range(_UpperCAmelCase )
]
_a : Any =0
_a : Optional[int] ={"""weight_map""": {}}
for layer_i in range(_UpperCAmelCase ):
_a : List[str] =F"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_a : List[str] ={
F"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wq.weight"] ),
F"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wk.weight"] ),
F"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[F"layers.{layer_i}.attention.wv.weight"],
F"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[F"layers.{layer_i}.attention.wo.weight"],
F"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w1.weight"],
F"model.layers.{layer_i}.mlp.down_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w2.weight"],
F"model.layers.{layer_i}.mlp.up_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w3.weight"],
F"model.layers.{layer_i}.input_layernorm.weight": loaded[F"layers.{layer_i}.attention_norm.weight"],
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[F"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_a : Tuple ={
F"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
F"layers.{layer_i}.attention_norm.weight"
].clone(),
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
F"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
_a : str =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wq.weight"].view(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Tuple =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wk.weight"].view(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wv.weight"].view(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase )
_a : List[str] =torch.cat(
[loaded[i][F"layers.{layer_i}.attention.wo.weight"] for i in range(_UpperCAmelCase )] ,dim=1 )
_a : Union[str, Any] =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w1.weight"] for i in range(_UpperCAmelCase )] ,dim=0 )
_a : Tuple =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w2.weight"] for i in range(_UpperCAmelCase )] ,dim=1 )
_a : Union[str, Any] =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w3.weight"] for i in range(_UpperCAmelCase )] ,dim=0 )
_a : str =inv_freq
for k, v in state_dict.items():
_a : Any =filename
param_count += v.numel()
torch.save(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Union[str, Any] =F"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_a : List[str] ={
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_a : int ={
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(_UpperCAmelCase )] ,dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_UpperCAmelCase )] ,dim=0 ),
}
for k, v in state_dict.items():
_a : Dict =filename
param_count += v.numel()
torch.save(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )
# Write configs
_a : Tuple ={"""total_size""": param_count * 2}
write_json(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,"""pytorch_model.bin.index.json""" ) )
_a : Optional[Any] =params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_a : int =params["""multiple_of"""] if """multiple_of""" in params else 256
_a : List[Any] =LlamaConfig(
hidden_size=_UpperCAmelCase ,intermediate_size=compute_intermediate_size(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,num_attention_heads=params["""n_heads"""] ,num_hidden_layers=params["""n_layers"""] ,rms_norm_eps=params["""norm_eps"""] ,num_key_value_heads=_UpperCAmelCase ,)
config.save_pretrained(_UpperCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_a : Any =LlamaForCausalLM.from_pretrained(_UpperCAmelCase ,torch_dtype=torch.floataa ,low_cpu_mem_usage=_UpperCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(_UpperCAmelCase ,safe_serialization=_UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> Optional[Any]:
# Initialize the tokenizer based on the `spm` model
_a : List[str] =LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
_a : List[Any] =tokenizer_class(_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : List[str] =argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" ,help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" ,)
parser.add_argument(
"""--model_size""" ,choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] ,)
parser.add_argument(
"""--output_dir""" ,help="""Location to write HF model and tokenizer""" ,)
parser.add_argument("""--safe_serialization""" ,type=_UpperCAmelCase ,help="""Whether or not to save using `safetensors`.""" )
_a : Optional[Any] =parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir ,input_base_path=os.path.join(args.input_dir ,args.model_size ) ,model_size=args.model_size ,safe_serialization=args.safe_serialization ,)
_a : List[Any] =os.path.join(args.input_dir ,"""tokenizer.model""" )
write_tokenizer(args.output_dir ,_UpperCAmelCase )
if __name__ == "__main__":
main()
| 276 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
snake_case_ : Dict = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class lowercase__ ( unittest.TestCase ):
@classmethod
def UpperCamelCase_ ( cls : str ):
'''simple docstring'''
_UpperCamelCase : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def UpperCamelCase_ ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
_UpperCamelCase : int = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub('test-model-flax' ,use_auth_token=self._token )
_UpperCamelCase : Union[str, Any] = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
_UpperCamelCase : Optional[int] = flatten_dict(unfreeze(model.params ) )
_UpperCamelCase : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCamelCase : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ ,1E-3 ,msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token ,repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ ,repo_id='test-model-flax' ,push_to_hub=lowerCamelCase__ ,use_auth_token=self._token )
_UpperCamelCase : List[str] = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
_UpperCamelCase : Any = flatten_dict(unfreeze(model.params ) )
_UpperCamelCase : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCamelCase : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ ,1E-3 ,msg=F'{key} not identical' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Dict = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
_UpperCamelCase : List[Any] = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub('valid_org/test-model-flax-org' ,use_auth_token=self._token )
_UpperCamelCase : List[str] = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
_UpperCamelCase : Any = flatten_dict(unfreeze(model.params ) )
_UpperCamelCase : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCamelCase : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ ,1E-3 ,msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCamelCase__ ,repo_id='valid_org/test-model-flax-org' ,push_to_hub=lowerCamelCase__ ,use_auth_token=self._token )
_UpperCamelCase : str = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
_UpperCamelCase : Tuple = flatten_dict(unfreeze(model.params ) )
_UpperCamelCase : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCamelCase : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ ,1E-3 ,msg=F'{key} not identical' )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : Union[str, Any] = flatten_dict(modela.params )
_UpperCamelCase : Any = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
_UpperCamelCase : List[str] = False
return models_are_equal
@require_flax
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
_UpperCamelCase : Optional[Any] = FlaxBertModel(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) )
with self.assertRaises(lowerCamelCase__ ):
_UpperCamelCase : Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__ ,subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__ ,lowerCamelCase__ ) )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
_UpperCamelCase : str = FlaxBertModel(lowerCamelCase__ )
_UpperCamelCase : Any = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) ,max_shard_size='10KB' )
with self.assertRaises(lowerCamelCase__ ):
_UpperCamelCase : Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__ ,subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__ ,lowerCamelCase__ ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = 'bert'
_UpperCamelCase : Tuple = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(lowerCamelCase__ ):
_UpperCamelCase : Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = FlaxBertModel.from_pretrained(lowerCamelCase__ ,subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Any = 'bert'
_UpperCamelCase : Union[str, Any] = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(lowerCamelCase__ ):
_UpperCamelCase : Dict = FlaxBertModel.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Dict = FlaxBertModel.from_pretrained(lowerCamelCase__ ,subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 83 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : str ) -> Dict:
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" ,[False, True] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ) -> Optional[Any]:
_a : Any =tmp_path / """cache"""
_a : int ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : Tuple =SqlDatasetReader(
"""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase ,_UpperCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" ,[
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ) -> List[Any]:
_a : Union[str, Any] =tmp_path / """cache"""
_a : str ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_a : Optional[int] =features.copy() if features else default_expected_features
_a : Union[str, Any] =(
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : Optional[Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
with contextlib.closing(sqlitea.connect(_UpperCAmelCase ) ) as con:
_a : Any =con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> Union[str, Any]:
_a : Union[str, Any] =tmp_path / """cache"""
_a : Union[str, Any] =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Tuple =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=1 ).write()
_a : Tuple =iter_sql_file(_UpperCAmelCase )
_a : List[Any] =iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase ,_UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[Any] ) -> Optional[int]:
_a : int =tmp_path / """cache"""
_a : Any =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Union[str, Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=2 ).write()
_a : List[Any] =iter_sql_file(_UpperCAmelCase )
_a : str =iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase ,_UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ) -> List[str]:
_a : List[str] =tmp_path / """cache"""
_a : Dict =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Optional[Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
with pytest.raises(_UpperCAmelCase ):
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=0 ).write()
| 276 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , ) -> Any:
lowerCAmelCase_ :int = parent
lowerCAmelCase_ :Tuple = 13
lowerCAmelCase_ :Optional[Any] = 7
lowerCAmelCase_ :List[str] = True
lowerCAmelCase_ :Union[str, Any] = True
lowerCAmelCase_ :Tuple = True
lowerCAmelCase_ :int = 99
lowerCAmelCase_ :Optional[Any] = 32
lowerCAmelCase_ :Optional[int] = 2
lowerCAmelCase_ :Optional[Any] = 4
lowerCAmelCase_ :Any = 37
lowerCAmelCase_ :List[Any] = """gelu"""
lowerCAmelCase_ :Optional[Any] = 0.1
lowerCAmelCase_ :Dict = 0.1
lowerCAmelCase_ :Union[str, Any] = 512
lowerCAmelCase_ :Union[str, Any] = 16
lowerCAmelCase_ :Optional[int] = 2
lowerCAmelCase_ :str = 0.0_2
lowerCAmelCase_ :List[Any] = 3
lowerCAmelCase_ :Union[str, Any] = 4
lowerCAmelCase_ :int = None
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :Any = None
if self.use_input_mask:
lowerCAmelCase_ :Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :Tuple = None
lowerCAmelCase_ :Tuple = None
lowerCAmelCase_ :Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ :str = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ :Optional[Any] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> List[str]:
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :Tuple = self.prepare_config_and_inputs()
lowerCAmelCase_ :Union[str, Any] = True
lowerCAmelCase_ :Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A ) -> int:
lowerCAmelCase_ :Optional[Any] = TFEsmModel(config=__A )
lowerCAmelCase_ :Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase_ :List[str] = model(__A )
lowerCAmelCase_ :Union[str, Any] = [input_ids, input_mask]
lowerCAmelCase_ :int = model(__A )
lowerCAmelCase_ :int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Any:
lowerCAmelCase_ :Optional[int] = True
lowerCAmelCase_ :Tuple = TFEsmModel(config=__A )
lowerCAmelCase_ :List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
lowerCAmelCase_ :Dict = model(__A )
lowerCAmelCase_ :Optional[int] = [input_ids, input_mask]
lowerCAmelCase_ :Optional[Any] = model(__A , encoder_hidden_states=__A )
# Also check the case where encoder outputs are not passed
lowerCAmelCase_ :int = model(__A , attention_mask=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A ) -> Any:
lowerCAmelCase_ :Optional[int] = TFEsmForMaskedLM(config=__A )
lowerCAmelCase_ :Tuple = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Dict = self.num_labels
lowerCAmelCase_ :List[str] = TFEsmForTokenClassification(config=__A )
lowerCAmelCase_ :str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase_ :Tuple = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :Optional[int] = config_and_inputs
lowerCAmelCase_ :Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Any = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ :int = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ :Any = False
UpperCAmelCase_ :List[str] = False
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :str = TFEsmModelTester(self )
lowerCAmelCase_ :List[Any] = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :Union[str, Any] = TFEsmModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def __lowerCAmelCase ( self ) -> str:
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :Union[str, Any] = model_class(__A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase_ :Optional[int] = model.get_bias()
assert isinstance(__A , __A )
for k, v in name.items():
assert isinstance(__A , tf.Variable )
else:
lowerCAmelCase_ :Tuple = model.get_output_embeddings()
assert x is None
lowerCAmelCase_ :Any = model.get_bias()
assert name is None
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Any = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowerCAmelCase_ :Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase_ :str = model(__A )[0]
lowerCAmelCase_ :str = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __A )
# compare the actual values for a slice.
lowerCAmelCase_ :int = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[str] = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowerCAmelCase_ :Dict = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase_ :Tuple = model(__A )[0]
# compare the actual values for a slice.
lowerCAmelCase_ :int = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 84 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: List[str] = logging.get_logger(__name__)
A__: Union[str, Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : int = "data2vec-text"
def __init__( self :str , SCREAMING_SNAKE_CASE :Optional[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Any=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Dict=3_0_7_2 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :List[str]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :int=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Dict=1e-12 , SCREAMING_SNAKE_CASE :int=1 , SCREAMING_SNAKE_CASE :Dict=0 , SCREAMING_SNAKE_CASE :List[Any]=2 , SCREAMING_SNAKE_CASE :str="absolute" , SCREAMING_SNAKE_CASE :Tuple=True , SCREAMING_SNAKE_CASE :Union[str, Any]=None , **SCREAMING_SNAKE_CASE :Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : Optional[Any] =vocab_size
_a : Optional[Any] =hidden_size
_a : Any =num_hidden_layers
_a : List[str] =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Any =intermediate_size
_a : str =hidden_dropout_prob
_a : Optional[Any] =attention_probs_dropout_prob
_a : Optional[Any] =max_position_embeddings
_a : Union[str, Any] =type_vocab_size
_a : Tuple =initializer_range
_a : Optional[int] =layer_norm_eps
_a : Tuple =position_embedding_type
_a : int =use_cache
_a : List[str] =classifier_dropout
class A__ ( UpperCAmelCase__ ):
@property
def __UpperCAmelCase ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_a : Tuple ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_a : List[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 276 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=30 , a__=400 , a__=True , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , a__=True , a__=1 / 255 , a__=True , ) -> Tuple:
'''simple docstring'''
snake_case_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_pad
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , a__ , a__=False ) -> Optional[int]:
'''simple docstring'''
if not batched:
snake_case_ = image_inputs[0]
if isinstance(a__ , Image.Image ):
snake_case_ , snake_case_ = image.size
else:
snake_case_ , snake_case_ = image.shape[1], image.shape[2]
if w < h:
snake_case_ = int(self.size["shortest_edge"] * h / w )
snake_case_ = self.size["shortest_edge"]
elif w > h:
snake_case_ = self.size["shortest_edge"]
snake_case_ = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ = self.size["shortest_edge"]
snake_case_ = self.size["shortest_edge"]
else:
snake_case_ = []
for image in image_inputs:
snake_case_ , snake_case_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ = max(a__ , key=lambda a__ : item[0] )[0]
snake_case_ = max(a__ , key=lambda a__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Any = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , "image_mean" ) )
self.assertTrue(hasattr(a__ , "image_std" ) )
self.assertTrue(hasattr(a__ , "do_normalize" ) )
self.assertTrue(hasattr(a__ , "do_resize" ) )
self.assertTrue(hasattr(a__ , "size" ) )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , a__ )
snake_case_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , a__ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
snake_case_ = image_processing(a__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(a__ , return_tensors="pt" ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(a__ , return_tensors="pt" ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {"image_id": 39_769, "annotations": target}
# encode them
snake_case_ = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case_ = image_processing(images=a__ , annotations=a__ , return_tensors="pt" )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , a__ )
snake_case_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a__ , atol=1e-4 ) )
# verify area
snake_case_ = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a__ ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a__ )
snake_case_ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a__ , atol=1e-3 ) )
# verify image_id
snake_case_ = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a__ ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a__ ) )
# verify class_labels
snake_case_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a__ ) )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a__ ) )
# verify size
snake_case_ = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a__ ) )
@slow
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
snake_case_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case_ = image_processing(images=a__ , annotations=a__ , masks_path=a__ , return_tensors="pt" )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , a__ )
snake_case_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a__ , atol=1e-4 ) )
# verify area
snake_case_ = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a__ ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a__ )
snake_case_ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a__ , atol=1e-3 ) )
# verify image_id
snake_case_ = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a__ ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a__ ) )
# verify class_labels
snake_case_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a__ ) )
# verify masks
snake_case_ = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , a__ )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a__ ) )
# verify size
snake_case_ = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a__ ) )
| 85 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
A__: Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> int:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] = None ) -> Optional[int]:
_a : Any =tesseract_config if tesseract_config is not None else """"""
# apply OCR
_a : Optional[Any] =to_pil_image(_UpperCAmelCase )
_a , _a : List[Any] =pil_image.size
_a : List[str] =pytesseract.image_to_data(_UpperCAmelCase ,lang=_UpperCAmelCase ,output_type="""dict""" ,config=_UpperCAmelCase )
_a , _a , _a , _a , _a : str =data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_a : Tuple =[idx for idx, word in enumerate(_UpperCAmelCase ) if not word.strip()]
_a : List[Any] =[word for idx, word in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Dict =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : List[str] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : List[str] =[]
for x, y, w, h in zip(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
_a : int =[x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase )
# finally, normalize the bounding boxes
_a : str =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[Any] = ["pixel_values"]
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = "" , **SCREAMING_SNAKE_CASE :Tuple , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : List[Any] =size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_a : Tuple =get_size_dict(SCREAMING_SNAKE_CASE )
_a : Dict =do_resize
_a : Tuple =size
_a : str =resample
_a : Dict =apply_ocr
_a : Union[str, Any] =ocr_lang
_a : Dict =tesseract_config
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Dict , ) -> np.ndarray:
'''simple docstring'''
_a : int =get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
_a : Any =(size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :ImageInput , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE :ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
_a : Optional[int] =do_resize if do_resize is not None else self.do_resize
_a : Optional[int] =size if size is not None else self.size
_a : str =get_size_dict(SCREAMING_SNAKE_CASE )
_a : List[str] =resample if resample is not None else self.resample
_a : int =apply_ocr if apply_ocr is not None else self.apply_ocr
_a : str =ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Union[str, Any] =tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[str] =make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_a : List[Any] =[to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_a : Any =[]
_a : Any =[]
for image in images:
_a , _a : int =apply_tesseract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
words_batch.append(SCREAMING_SNAKE_CASE )
boxes_batch.append(SCREAMING_SNAKE_CASE )
if do_resize:
_a : Union[str, Any] =[self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_a : Dict =[flip_channel_order(SCREAMING_SNAKE_CASE ) for image in images]
_a : str =[to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
_a : str =BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE )
if apply_ocr:
_a : List[Any] =words_batch
_a : Dict =boxes_batch
return data
| 276 | 0 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class A__ ( unittest.TestCase):
A_ : List[str] = MODEL_FOR_MASKED_LM_MAPPING
A_ : Union[str, Any] = TF_MODEL_FOR_MASKED_LM_MAPPING
def __lowerCamelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
__lowerCAmelCase : int = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1E-05, 'token': 3_80_15, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1E-05, 'token': 2_55_06, 'token_str': ' accuser'},
] , )
__lowerCAmelCase : Dict = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1E-05,
'token': 3_80_15,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1E-05,
'token': 2_55_06,
'token_str': ' accuser',
},
] , )
__lowerCAmelCase : Optional[Any] = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 1_36_06, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2E-05, 'token': 34_99, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9E-05, 'token': 29_41, 'token_str': ' Te'},
] , )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
__lowerCAmelCase : List[Any] = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2E-05, 'token': 3_56_76, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2E-05, 'token': 1_64_16, 'token_str': 'ELS'},
] , )
__lowerCAmelCase : int = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2E-05,
'token': 3_56_76,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2E-05, 'token': 1_64_16, 'token_str': 'ELS'},
] , )
__lowerCAmelCase : Union[str, Any] = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1E-05, 'token': 34_99, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2E-05, 'token': 29_41, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 1_36_06, 'token_str': ' Clara'},
] , )
__lowerCAmelCase : str = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
[
{
'score': 2.2E-05,
'token': 3_56_76,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2E-05, 'token': 1_64_16, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2E-05,
'token': 3_56_76,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2E-05, 'token': 1_64_16, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
__lowerCAmelCase : Optional[int] = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_SCREAMING_SNAKE_CASE )
@slow
@require_tf
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{'sequence': 'My name is John', 'score': 0.008, 'token': 6_10, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.007, 'token': 15_73, 'token_str': ' Chris'},
] , )
__lowerCAmelCase : Optional[int] = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.251,
'token': 22_01,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.214,
'token': 1_27_90,
'token_str': ' Lyon',
},
] , )
__lowerCAmelCase : Dict = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{'sequence': 'My name is Patrick', 'score': 0.005, 'token': 34_99, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.000, 'token': 1_36_06, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.000, 'token': 29_41, 'token_str': ' Te'},
] , )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : Any = None
self.run_pipeline_test(_SCREAMING_SNAKE_CASE , [] )
@require_tf
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
__lowerCAmelCase : str = None
__lowerCAmelCase : Union[str, Any] = None
self.run_pipeline_test(_SCREAMING_SNAKE_CASE , [] )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
__lowerCAmelCase : List[str] = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = fill_masker.tokenizer
__lowerCAmelCase : str = fill_masker.model
__lowerCAmelCase : List[Any] = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
__lowerCAmelCase : Optional[Any] = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
__lowerCAmelCase : Optional[Any] = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
fill_masker('This is' )
self.run_test_top_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.run_test_targets(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.run_test_top_k_targets(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.fill_mask_with_duplicate_targets_and_top_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.fill_mask_with_multiple_masks(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = tokenizer.get_vocab()
__lowerCAmelCase : Any = sorted(vocab.keys() )[:2]
# Pipeline argument
__lowerCAmelCase : Optional[Any] = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , targets=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
__lowerCAmelCase : Tuple = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_SCREAMING_SNAKE_CASE ) )
# Call argument
__lowerCAmelCase : List[Any] = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = fill_masker(f"This is a {tokenizer.mask_token}" , targets=_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
__lowerCAmelCase : str = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_SCREAMING_SNAKE_CASE ) )
# Score equivalence
__lowerCAmelCase : Optional[Any] = fill_masker(f"This is a {tokenizer.mask_token}" , targets=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [top_mask['token_str'] for top_mask in outputs]
__lowerCAmelCase : Dict = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_SCREAMING_SNAKE_CASE ) == set(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = fill_masker(f"This is a {tokenizer.mask_token}" , targets=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , nested_simplify(_SCREAMING_SNAKE_CASE ) )
# Raises with invalid
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[''] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = fill_masker(f"This is a {tokenizer.mask_token}" , targets='' )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , top_k=2 )
__lowerCAmelCase : str = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
__lowerCAmelCase : Union[str, Any] = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , nested_simplify(_SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = tokenizer.get_vocab()
__lowerCAmelCase : Optional[int] = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
# top_k=2, ntargets=3
__lowerCAmelCase : Union[str, Any] = sorted(vocab.keys() )[:3]
__lowerCAmelCase : str = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=_SCREAMING_SNAKE_CASE )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__lowerCAmelCase : Dict = [el['token_str'] for el in sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x["score"] , reverse=_SCREAMING_SNAKE_CASE )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_SCREAMING_SNAKE_CASE ).issubset(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=_SCREAMING_SNAKE_CASE )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , nested_simplify(_SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
__lowerCAmelCase : Union[str, Any] = sorted(vocab.keys() )[:3]
__lowerCAmelCase : Any = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__lowerCAmelCase : Any = fill_masker(f"My name is {tokenizer.mask_token}" , targets=_SCREAMING_SNAKE_CASE , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 3 )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
] , ) | 86 |
'''simple docstring'''
from __future__ import annotations
import requests
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> dict:
_a : Any =F"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(_UpperCAmelCase ).json()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> list[dict]:
_a : Union[str, Any] ="""https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
_a : int =requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> str:
_a : Union[str, Any] =hackernews_top_stories(_UpperCAmelCase )
return "\n".join("""* [{title}]({url})""".format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 276 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : int = RoCBertTokenizer
__A : List[str] = None
__A : Dict = False
__A : Optional[int] = True
__A : List[Any] = filter_non_english
def __UpperCamelCase ( self : Dict ) -> Any:
super().setUp()
lowercase__ : Dict = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
lowercase__ : List[str] = {}
lowercase__ : List[str] = {}
for i, value in enumerate(lowercase_ ):
lowercase__ : Union[str, Any] = i
lowercase__ : Tuple = i
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(lowercase_ , lowercase_ , ensure_ascii=lowercase_ )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(lowercase_ , lowercase_ , ensure_ascii=lowercase_ )
def __UpperCamelCase ( self : Dict ) -> List[str]:
lowercase__ : str = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowercase__ : Any = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(lowercase_ , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowercase_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowercase_ ) , [5, 6, 2, 5, 7, 8] )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
lowercase__ : List[str] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __UpperCamelCase ( self : List[str] ) -> Dict:
lowercase__ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : int = RoCBertBasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __UpperCamelCase ( self : Dict ) -> List[str]:
lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
lowercase__ : str = RoCBertBasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=lowercase_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
lowercase__ : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowercase__ : Union[str, Any] = {}
for i, token in enumerate(lowercase_ ):
lowercase__ : Optional[Any] = i
lowercase__ : Dict = RoCBertWordpieceTokenizer(vocab=lowercase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __UpperCamelCase ( self : str ) -> Tuple:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __UpperCamelCase ( self : Dict ) -> int:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __UpperCamelCase ( self : Dict ) -> Any:
lowercase__ : int = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
lowercase__ : Optional[Any] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __UpperCamelCase ( self : List[str] ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : Dict = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
lowercase__ : str = tokenizer_r.encode_plus(
lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ , )
lowercase__ : Any = tokenizer_r.do_lower_case if hasattr(lowercase_ , "do_lower_case" ) else False
lowercase__ : Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowercase__ : Optional[Any] = ["的", "人", "有"]
lowercase__ : Optional[Any] = "".join(lowercase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : Optional[Any] = True
lowercase__ : str = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : Any = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : Optional[int] = tokenizer_p.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase__ : Optional[int] = tokenizer_r.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowercase_ )
lowercase__ : List[Any] = tokenizer_p.convert_ids_to_tokens(lowercase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase__ : int = False
lowercase__ : Any = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : Dict = tokenizer_r.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase__ : List[str] = tokenizer_p.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase__ : List[Any] = tokenizer_r.convert_ids_to_tokens(lowercase_ )
lowercase__ : str = tokenizer_p.convert_ids_to_tokens(lowercase_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__ : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowercase_ )
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def __UpperCamelCase ( self : Tuple ) -> int:
lowercase__ : Optional[int] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowercase__ : Any = tokenizer.encode("你好" , add_special_tokens=lowercase_ )
lowercase__ : Dict = tokenizer.encode("你是谁" , add_special_tokens=lowercase_ )
lowercase__ : str = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ : List[str] = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ : int = "你好,你是谁"
lowercase__ : int = tokenizer.tokenize(lowercase_ )
lowercase__ : str = tokenizer.convert_tokens_to_ids(lowercase_ )
lowercase__ : Tuple = tokenizer.convert_tokens_to_shape_ids(lowercase_ )
lowercase__ : int = tokenizer.convert_tokens_to_pronunciation_ids(lowercase_ )
lowercase__ : int = tokenizer.prepare_for_model(
lowercase_ , lowercase_ , lowercase_ , add_special_tokens=lowercase_ )
lowercase__ : Any = tokenizer.encode_plus(lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
| 87 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : torch.FloatTensor
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :Tuple[str] = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[str] = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[int] = (6_4,) , SCREAMING_SNAKE_CASE :int = 1 , SCREAMING_SNAKE_CASE :str = "silu" , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :int = 2_5_6 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :float = 0.18_215 , SCREAMING_SNAKE_CASE :str = "group" , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_a : Union[str, Any] =Encoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , down_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , double_z=SCREAMING_SNAKE_CASE , )
_a : Optional[int] =vq_embed_dim if vq_embed_dim is not None else latent_channels
_a : Optional[int] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
_a : str =VectorQuantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beta=0.25 , remap=SCREAMING_SNAKE_CASE , sane_index_shape=SCREAMING_SNAKE_CASE )
_a : List[str] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
# pass init params to Decoder
_a : List[str] =Decoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , up_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , norm_type=SCREAMING_SNAKE_CASE , )
@apply_forward_hook
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> VQEncoderOutput:
'''simple docstring'''
_a : Optional[int] =self.encoder(SCREAMING_SNAKE_CASE )
_a : int =self.quant_conv(SCREAMING_SNAKE_CASE )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE )
@apply_forward_hook
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
_a , _a , _a : Tuple =self.quantize(SCREAMING_SNAKE_CASE )
else:
_a : str =h
_a : Dict =self.post_quant_conv(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =self.decoder(SCREAMING_SNAKE_CASE , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
_a : Tuple =sample
_a : int =self.encode(SCREAMING_SNAKE_CASE ).latents
_a : List[Any] =self.decode(SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE )
| 276 | 0 |
def a__ ( A_ = 10**9 ):
'''simple docstring'''
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__magic_name__ = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 88 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int]=1_3 , SCREAMING_SNAKE_CASE :Optional[int]=7 , SCREAMING_SNAKE_CASE :Tuple=False , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :List[str]=3_3 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Tuple=5 , SCREAMING_SNAKE_CASE :int=4 , SCREAMING_SNAKE_CASE :Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Optional[Any]=0.1 , SCREAMING_SNAKE_CASE :Tuple=0.1 , SCREAMING_SNAKE_CASE :str=5_1_2 , SCREAMING_SNAKE_CASE :Dict=1_6 , SCREAMING_SNAKE_CASE :Dict=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :str=3 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :List[str]=None , ) -> Union[str, Any]:
'''simple docstring'''
_a : Union[str, Any] =parent
_a : List[Any] =batch_size
_a : Optional[int] =seq_length
_a : Union[str, Any] =is_training
_a : List[Any] =use_input_mask
_a : Optional[int] =use_token_type_ids
_a : int =use_labels
_a : List[str] =vocab_size
_a : List[Any] =hidden_size
_a : int =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Any =intermediate_size
_a : str =hidden_act
_a : Union[str, Any] =hidden_dropout_prob
_a : Union[str, Any] =attention_probs_dropout_prob
_a : str =max_position_embeddings
_a : Dict =type_vocab_size
_a : Tuple =type_sequence_label_size
_a : Dict =initializer_range
_a : List[str] =num_labels
_a : Tuple =num_choices
_a : int =scope
def __UpperCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : List[Any] =None
if self.use_input_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
_a : str =None
_a : Dict =None
if self.use_labels:
_a : Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[str] =ids_tensor([self.batch_size] , self.num_choices )
_a : List[Any] =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int ) -> Tuple:
'''simple docstring'''
_a : Any =EsmModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Optional[Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
_a : str =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : str =EsmForMaskedLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.num_labels
_a : Tuple =EsmForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Tuple =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
_a : Optional[Any] =self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Any =config_and_inputs
_a : List[Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Any = False
__UpperCamelCase : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : str = ()
__UpperCamelCase : List[str] = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Union[str, Any] = True
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
_a : Dict =EsmModelTester(self )
_a : Optional[Any] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCAmelCase ( self :Tuple ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Dict =type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
_a : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
_a : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :str ) -> Dict:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Union[str, Any] =EsmModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
_a : Optional[Any] =self.model_tester.prepare_config_and_inputs()[0]
_a : Dict =EsmEmbeddings(config=SCREAMING_SNAKE_CASE )
_a : Tuple =torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
_a : Optional[Any] =torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_a : Any =create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =self.model_tester.prepare_config_and_inputs()[0]
_a : Optional[int] =EsmEmbeddings(config=SCREAMING_SNAKE_CASE )
_a : Tuple =torch.empty(2 , 4 , 3_0 )
_a : str =[
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_a : int =torch.as_tensor([expected_single_positions, expected_single_positions] )
_a : Any =embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def __UpperCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def __UpperCAmelCase ( self :str ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
pass
@require_torch
class A__ ( UpperCAmelCase__ ):
@slow
def __UpperCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
with torch.no_grad():
_a : Optional[int] =EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_a : Any =torch.tensor([[0, 1, 2, 3, 4, 5]] )
_a : Tuple =model(SCREAMING_SNAKE_CASE )[0]
_a : int =3_3
_a : Tuple =torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
_a : Any =EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_a : Any =torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_a : int =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 276 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
lowerCAmelCase : List[str] = 'CIDAS/clipseg-rd64-refined'
lowerCAmelCase : Optional[int] = 'image_segmenter'
lowerCAmelCase : Union[str, Any] = CLIPSegForImageSegmentation
lowerCAmelCase : str = ['image', 'text']
lowerCAmelCase : int = ['image']
def __init__( self : Tuple ,*_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : Any ):
requires_backends(self ,['vision'] )
super().__init__(*_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : "Image" ,_UpperCAmelCase : str ):
return self.pre_processor(text=[label] ,images=[image] ,padding=_UpperCAmelCase ,return_tensors='pt' )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Optional[Any] ):
with torch.no_grad():
_a : int = self.model(**_UpperCAmelCase ).logits
return logits
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Optional[int] ):
_a : Dict = outputs.cpu().detach().numpy()
_a : List[str] = 0
_a : int = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 89 |
'''simple docstring'''
from math import isqrt
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 ,isqrt(_UpperCAmelCase ) + 1 ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10**6 ) -> int:
_a : List[Any] =0
_a : str =1
_a : Optional[Any] =7
while prime_candidate < max_prime:
primes_count += is_prime(_UpperCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 276 | 0 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = "The Nymphenburg Palace is a beautiful palace in Munich!"
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1024,
'hidden_size': 768,
'max_length': 512,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1024,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1E-5,
'token_type_vocab_size': 2,
}
__lowerCamelCase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__lowerCamelCase = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=UpperCamelCase__ , output_all_encodings=UpperCamelCase__ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , UpperCamelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__lowerCamelCase = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__lowerCamelCase = os.path.join(get_home_dir() , 'models' )
__lowerCamelCase = _load_vocab(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , cls=UpperCamelCase__ )
__lowerCamelCase = nlp.model.BERTModel(
UpperCamelCase__ , len(UpperCamelCase__ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=UpperCamelCase__ , use_token_type_embed=UpperCamelCase__ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=UpperCamelCase__ , use_decoder=UpperCamelCase__ , )
original_bort.load_parameters(UpperCamelCase__ , cast_dtype=UpperCamelCase__ , ignore_extra=UpperCamelCase__ )
__lowerCamelCase = original_bort._collect_params_with_prefix()
# Build our config 🤗
__lowerCamelCase = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(UpperCamelCase__ ),
}
__lowerCamelCase = BertConfig.from_dict(UpperCamelCase__ )
__lowerCamelCase = BertForMaskedLM(UpperCamelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(UpperCamelCase__ : int ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
__lowerCamelCase = hf_param.shape
__lowerCamelCase = to_torch(params[gluon_param] )
__lowerCamelCase = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
__lowerCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__lowerCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__lowerCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__lowerCamelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__lowerCamelCase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__lowerCamelCase = hf_bort_model.bert.encoder.layer[i]
# self attention
__lowerCamelCase = layer.attention.self
__lowerCamelCase = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
__lowerCamelCase = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
__lowerCamelCase = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
__lowerCamelCase = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
__lowerCamelCase = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
__lowerCamelCase = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
__lowerCamelCase = layer.attention.output
__lowerCamelCase = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
__lowerCamelCase = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
__lowerCamelCase = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
__lowerCamelCase = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
__lowerCamelCase = layer.intermediate
__lowerCamelCase = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
__lowerCamelCase = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
__lowerCamelCase = layer.output
__lowerCamelCase = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
__lowerCamelCase = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
__lowerCamelCase = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
__lowerCamelCase = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__lowerCamelCase = RobertaTokenizer.from_pretrained('roberta-base' )
__lowerCamelCase = tokenizer.encode_plus(UpperCamelCase__ )['input_ids']
# Get gluon output
__lowerCamelCase = mx.nd.array([input_ids] )
__lowerCamelCase = original_bort(inputs=UpperCamelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = BertModel.from_pretrained(UpperCamelCase__ )
hf_bort_model.eval()
__lowerCamelCase = tokenizer.encode_plus(UpperCamelCase__ , return_tensors='pt' )
__lowerCamelCase = hf_bort_model(**UpperCamelCase__ )[0]
__lowerCamelCase = output_gluon[0].asnumpy()
__lowerCamelCase = output_hf[0].detach().numpy()
__lowerCamelCase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__lowerCamelCase = np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 90 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 276 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 91 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" ,[
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(_UpperCAmelCase ,i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Dict ) -> Optional[Any]:
_a : Tuple =_distribute_shards(**_UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" ,[
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any] ) -> List[str]:
_a : List[str] =_split_gen_kwargs(_UpperCAmelCase ,_UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" ,[
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
if expected is RuntimeError:
with pytest.raises(_UpperCAmelCase ):
_number_of_shards_in_gen_kwargs(_UpperCAmelCase )
else:
_a : Dict =_number_of_shards_in_gen_kwargs(_UpperCAmelCase )
assert out == expected
| 276 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def _a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : PriorityQueue , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCAmelCase = cst_fwd.get(SCREAMING_SNAKE_CASE_ , np.inf )
__lowerCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCAmelCase = new_cost_f
__lowerCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict ):
__lowerCAmelCase = -1
__lowerCAmelCase = set()
__lowerCAmelCase = set()
__lowerCAmelCase = {source: 0}
__lowerCAmelCase = {destination: 0}
__lowerCAmelCase = {source: None}
__lowerCAmelCase = {destination: None}
__lowerCAmelCase = PriorityQueue()
__lowerCAmelCase = PriorityQueue()
__lowerCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCAmelCase , __lowerCAmelCase = queue_forward.get()
visited_forward.add(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = queue_backward.get()
visited_backward.add(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = pass_and_relaxation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__lowerCAmelCase = pass_and_relaxation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCAmelCase = shortest_distance
return shortest_path_distance
UpperCamelCase__ = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
UpperCamelCase__ = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Tuple = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "roc_bert"
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :Tuple=3_0_5_2_2 , SCREAMING_SNAKE_CASE :List[str]=7_6_8 , SCREAMING_SNAKE_CASE :Dict=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE :List[Any]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Optional[int]="absolute" , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :Optional[Any]=9_1_0 , SCREAMING_SNAKE_CASE :Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE :str=2_4_8_5_8 , SCREAMING_SNAKE_CASE :List[Any]=True , **SCREAMING_SNAKE_CASE :Tuple , ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =vocab_size
_a : List[str] =max_position_embeddings
_a : Optional[Any] =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[str] =num_attention_heads
_a : int =intermediate_size
_a : Any =hidden_act
_a : Dict =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : str =initializer_range
_a : Optional[int] =type_vocab_size
_a : Any =layer_norm_eps
_a : Any =use_cache
_a : Optional[int] =enable_pronunciation
_a : Optional[Any] =enable_shape
_a : Optional[Any] =pronunciation_embed_dim
_a : Tuple =pronunciation_vocab_size
_a : Union[str, Any] =shape_embed_dim
_a : Any =shape_vocab_size
_a : Tuple =concat_input
_a : List[str] =position_embedding_type
_a : List[str] =classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 276 | 0 |
'''simple docstring'''
from __future__ import annotations
import requests
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Any = F'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(__SCREAMING_SNAKE_CASE ).json()
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 10 ):
"""simple docstring"""
lowercase_ : int = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
lowercase_ : Tuple = requests.get(__SCREAMING_SNAKE_CASE ).json()[:max_stories]
return [get_hackernews_story(__SCREAMING_SNAKE_CASE ) for story_id in story_ids]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 10 ):
"""simple docstring"""
lowercase_ : Dict = hackernews_top_stories(__SCREAMING_SNAKE_CASE )
return "\n".join('''* [{title}]({url})'''.format(**__SCREAMING_SNAKE_CASE ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 93 |
'''simple docstring'''
class A__ :
def __init__( self :List[str] ) -> List[Any]:
'''simple docstring'''
_a : Tuple =0
_a : Any =0
_a : int ={}
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :List[str] ) -> Optional[int]:
'''simple docstring'''
if vertex not in self.adjacency:
_a : Dict ={}
self.num_vertices += 1
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Any ) -> List[str]:
'''simple docstring'''
self.add_vertex(SCREAMING_SNAKE_CASE )
self.add_vertex(SCREAMING_SNAKE_CASE )
if head == tail:
return
_a : Any =weight
_a : Tuple =weight
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =self.get_edges()
for edge in edges:
_a , _a , _a : List[str] =edge
edges.remove((tail, head, weight) )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
_a : str =list(edges[i] )
edges.sort(key=lambda SCREAMING_SNAKE_CASE : e[2] )
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a : Union[str, Any] =edges[i][2] + 1
for edge in edges:
_a , _a , _a : Tuple =edge
_a : Tuple =weight
_a : List[Any] =weight
def __str__( self :int ) -> str:
'''simple docstring'''
_a : int =""""""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a : str =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("""\n""" )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : Union[str, Any] =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[Any]=None ) -> Optional[int]:
'''simple docstring'''
_a : str =Graph()
if vertices is None:
_a : Union[str, Any] =[]
if edges is None:
_a : List[Any] =[]
for vertex in vertices:
g.add_vertex(SCREAMING_SNAKE_CASE )
for edge in edges:
g.add_edge(*SCREAMING_SNAKE_CASE )
return g
class A__ :
def __init__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_a : Optional[int] ={}
_a : List[str] ={}
def __len__( self :List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.parent )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Tuple ) -> Dict:
'''simple docstring'''
if item in self.parent:
return self.find(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =item
_a : List[str] =0
return item
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Dict ) -> List[str]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(SCREAMING_SNAKE_CASE )
if item != self.parent[item]:
_a : str =self.find(self.parent[item] )
return self.parent[item]
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :List[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[int] =self.find(SCREAMING_SNAKE_CASE )
_a : Dict =self.find(SCREAMING_SNAKE_CASE )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a : Any =roota
return roota
if self.rank[roota] < self.rank[roota]:
_a : List[str] =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a : List[Any] =roota
return roota
return None
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict ) -> Union[str, Any]:
'''simple docstring'''
_a : Any =graph.num_vertices
_a : Union[str, Any] =Graph.UnionFind()
_a : Optional[int] =[]
while num_components > 1:
_a : str ={}
for vertex in graph.get_vertices():
_a : List[str] =-1
_a : Any =graph.get_edges()
for edge in edges:
_a , _a , _a : Tuple =edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a : Any =edge
_a : Any =union_find.find(SCREAMING_SNAKE_CASE )
_a : List[Any] =union_find.find(SCREAMING_SNAKE_CASE )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a : Optional[int] =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a : List[Any] =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a : Optional[Any] =cheap_edge[vertex]
if union_find.find(SCREAMING_SNAKE_CASE ) != union_find.find(SCREAMING_SNAKE_CASE ):
union_find.union(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
mst_edges.append(cheap_edge[vertex] )
_a : str =num_components - 1
_a : str =Graph.build(edges=SCREAMING_SNAKE_CASE )
return mst
| 276 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
snake_case : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class _snake_case ( _snake_case ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=_lowerCamelCase , speech_processor=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , feature_extractor=_lowerCamelCase , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase = "auto" ):
if slice_size == "auto":
a :List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase , _lowerCamelCase=1_6000 , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
a :Union[str, Any] = self.speech_processor.feature_extractor(
_lowerCamelCase , return_tensors='''pt''' , sampling_rate=_lowerCamelCase ).input_features.to(self.device )
a :Optional[Any] = self.speech_model.generate(_lowerCamelCase , max_length=48_0000 )
a :List[Any] = self.speech_processor.tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , normalize=_lowerCamelCase )[
0
]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :Any = 1
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
a :Tuple = len(_lowerCamelCase )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_lowerCamelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCamelCase , _lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_lowerCamelCase )}.''' )
# get prompt text embeddings
a :Tuple = self.tokenizer(
_lowerCamelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
a :str = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
a :List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
a :List[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
a :int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
a , a , a :List[Any] = text_embeddings.shape
a :int = text_embeddings.repeat(1 , _lowerCamelCase , 1 )
a :Dict = text_embeddings.view(bs_embed * num_images_per_prompt , _lowerCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a :List[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a :List[str]
if negative_prompt is None:
a :int = [''''''] * batch_size
elif type(_lowerCamelCase ) is not type(_lowerCamelCase ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(_lowerCamelCase )} !='''
F''' {type(_lowerCamelCase )}.''' )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
a :str = [negative_prompt]
elif batch_size != len(_lowerCamelCase ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(_lowerCamelCase )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
a :str = negative_prompt
a :List[Any] = text_input_ids.shape[-1]
a :Optional[Any] = self.tokenizer(
_lowerCamelCase , padding='''max_length''' , max_length=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='''pt''' , )
a :Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
a :List[str] = uncond_embeddings.shape[1]
a :Any = uncond_embeddings.repeat(1 , _lowerCamelCase , 1 )
a :Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , _lowerCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a :Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a :List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
a :Any = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
a :int = torch.randn(_lowerCamelCase , generator=_lowerCamelCase , device='''cpu''' , dtype=_lowerCamelCase ).to(
self.device )
else:
a :Optional[Any] = torch.randn(_lowerCamelCase , generator=_lowerCamelCase , device=self.device , dtype=_lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
a :Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
a :int = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a :Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a :List[str] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a :Union[str, Any] = {}
if accepts_eta:
a :str = eta
for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
a :str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a :str = self.scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
# predict the noise residual
a :List[str] = self.unet(_lowerCamelCase , _lowerCamelCase , encoder_hidden_states=_lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
a , a :Dict = noise_pred.chunk(2 )
a :Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
a :int = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
a :Any = 1 / 0.1_8215 * latents
a :Tuple = self.vae.decode(_lowerCamelCase ).sample
a :Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a :Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a :Dict = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_lowerCamelCase , nsfw_content_detected=_lowerCamelCase )
| 94 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A__: Union[str, Any] = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
A__: Tuple = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
A__: Union[str, Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
A__: List[Any] = requests.get(image_url).content
A__: List[str] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 276 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Tuple = ["""pixel_values"""]
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_5_5 , lowerCAmelCase__ = True , lowerCAmelCase__ = 8 , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
a__ : Optional[Any] =do_rescale
a__ : Dict =rescale_factor
a__ : Optional[Any] =do_pad
a__ : Tuple =pad_size
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> str:
'''simple docstring'''
a__ , a__ : Tuple =get_image_size(lowerCAmelCase__ )
a__ : int =(old_height // size + 1) * size - old_height
a__ : str =(old_width // size + 1) * size - old_width
return pad(lowerCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
a__ : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
a__ : Dict =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ : Tuple =do_pad if do_pad is not None else self.do_pad
a__ : Tuple =pad_size if pad_size is not None else self.pad_size
a__ : List[Any] =make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
a__ : List[str] =[to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_rescale:
a__ : Tuple =[self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_pad:
a__ : List[Any] =[self.pad(lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
a__ : int =[to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
a__ : Union[str, Any] ={"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 95 |
'''simple docstring'''
A__: Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 276 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase__ = """pt"""
elif is_tf_available():
lowercase__ = """tf"""
else:
lowercase__ = """jax"""
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = PerceiverTokenizer
lowerCamelCase__ = False
def A_ ( self ):
super().setUp()
_lowerCamelCase : Optional[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A_ ( self , **lowercase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A_ ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_lowerCamelCase : List[Any] = []
for i in range(len(lowercase ) ):
try:
_lowerCamelCase : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowerCamelCase : Optional[int] = list(filter(lambda lowercase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
_lowerCamelCase : Any = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
_lowerCamelCase : List[str] = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
_lowerCamelCase : int = toks + toks
# toks_str = [t[1] for t in toks]
_lowerCamelCase : str = [t[0] for t in toks]
# Ensure consistency
_lowerCamelCase : Dict = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
_lowerCamelCase : str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
_lowerCamelCase : Optional[Any] = ' ' + output_txt
_lowerCamelCase : Tuple = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.perceiver_tokenizer
_lowerCamelCase : Dict = 'Unicode €.'
_lowerCamelCase : int = tokenizer(lowercase )
_lowerCamelCase : Tuple = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_lowerCamelCase : Optional[int] = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' )
_lowerCamelCase : Union[str, Any] = tokenizer('e è é ê ë' )
_lowerCamelCase : Tuple = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_lowerCamelCase : int = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.perceiver_tokenizer
_lowerCamelCase : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowerCamelCase : List[Any] = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_lowerCamelCase : Dict = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
_lowerCamelCase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowerCamelCase : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.perceiver_tokenizer
_lowerCamelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowerCamelCase : List[str] = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def A_ ( self ):
_lowerCamelCase : str = self.perceiver_tokenizer
_lowerCamelCase : Optional[int] = [
'Summary of the text.',
'Another summary.',
]
_lowerCamelCase : Optional[int] = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A_ ( self ):
# safety check on max_len default value so we are sure the test works
_lowerCamelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowerCamelCase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCamelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCamelCase : List[str] = ' He is very happy, UNwant\u00E9d,running'
_lowerCamelCase : List[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_lowerCamelCase : Any = tokenizer.__class__.from_pretrained(lowercase )
_lowerCamelCase : Optional[int] = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_lowerCamelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCamelCase : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowerCamelCase : Dict = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowerCamelCase : Optional[int] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_lowerCamelCase : str = tokenizer.__class__.from_pretrained(lowercase )
_lowerCamelCase : Any = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowerCamelCase : List[Any] = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[int] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowerCamelCase : Tuple = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowerCamelCase : List[str] = json.load(lowercase )
_lowerCamelCase : Optional[int] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowerCamelCase : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowerCamelCase : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowerCamelCase : Optional[int] = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowerCamelCase : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
_lowerCamelCase : Any = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A_ ( self ):
pass
def A_ ( self ):
pass
def A_ ( self ):
pass
def A_ ( self ):
pass
def A_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
_lowerCamelCase : List[Any] = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowerCamelCase : Dict = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase ) | 96 |
'''simple docstring'''
A__: Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__: Any = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__: int = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> str:
assert len(str(_UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_a : List[str] =year // 100
_a : List[str] =(5 * (century % 4) + 2) % 7
_a : Optional[int] =year % 100
_a : Any =centurian % 12
_a : int =(
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_a : Optional[Any] =(
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_a : str =(dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'vivit'
def __init__( self , UpperCamelCase_=224 , UpperCamelCase_=32 , UpperCamelCase_=[2, 16, 16] , UpperCamelCase_=3 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu_fast" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-06 , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :int = num_hidden_layers
UpperCamelCase__ :Optional[Any] = num_attention_heads
UpperCamelCase__ :List[str] = intermediate_size
UpperCamelCase__ :Dict = hidden_act
UpperCamelCase__ :str = hidden_dropout_prob
UpperCamelCase__ :Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ :Optional[int] = initializer_range
UpperCamelCase__ :int = layer_norm_eps
UpperCamelCase__ :List[Any] = image_size
UpperCamelCase__ :Optional[Any] = num_frames
UpperCamelCase__ :Dict = tubelet_size
UpperCamelCase__ :Optional[int] = num_channels
UpperCamelCase__ :List[Any] = qkv_bias
super().__init__(**UpperCamelCase_ ) | 97 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str
__UpperCamelCase : int
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> list[str]:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(_UpperCAmelCase ) )]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_a : List[Any] =all_rotations(_UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_a : BWTTransformDict ={
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCAmelCase ),
}
return response
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_a : List[str] =int(_UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(_UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_a : Optional[int] =[""""""] * len(_UpperCAmelCase )
for _ in range(len(_UpperCAmelCase ) ):
for i in range(len(_UpperCAmelCase ) ):
_a : int =bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
A__: Any = '''Provide a string that I will generate its BWT transform: '''
A__: Union[str, Any] = input(entry_msg).strip()
A__: Optional[int] = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
A__: Union[str, Any] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 276 | 0 |
"""simple docstring"""
from __future__ import annotations
import queue
class snake_case :
"""simple docstring"""
def __init__( self : int ,lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = data
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def a_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
UpperCAmelCase__ = input('Enter the value of the root node: ' ).strip().lower()
UpperCAmelCase__ = queue.Queue()
UpperCAmelCase__ = TreeNode(int(lowerCamelCase ) )
q.put(lowerCamelCase )
while not q.empty():
UpperCAmelCase__ = q.get()
UpperCAmelCase__ = f'''Enter the left node of {node_found.data}: '''
UpperCAmelCase__ = input(lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCAmelCase__ = TreeNode(int(lowerCamelCase ) )
UpperCAmelCase__ = left_node
q.put(lowerCamelCase )
UpperCAmelCase__ = f'''Enter the right node of {node_found.data}: '''
UpperCAmelCase__ = input(lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCAmelCase__ = TreeNode(int(lowerCamelCase ) )
UpperCAmelCase__ = right_node
q.put(lowerCamelCase )
raise
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
UpperCAmelCase__ = queue.Queue()
q.put(lowerCamelCase )
while not q.empty():
UpperCAmelCase__ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
UpperCAmelCase__ = queue.Queue()
q.put(lowerCamelCase )
while not q.empty():
UpperCAmelCase__ = []
while not q.empty():
UpperCAmelCase__ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowerCamelCase )
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
UpperCAmelCase__ = []
UpperCAmelCase__ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(lowerCamelCase )
UpperCAmelCase__ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase__ = stack.pop()
# start to traverse its right child
UpperCAmelCase__ = n.right
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
UpperCAmelCase__ = []
UpperCAmelCase__ = node
while n or stack:
while n:
stack.append(lowerCamelCase )
UpperCAmelCase__ = n.left
UpperCAmelCase__ = stack.pop()
print(n.data , end=',' )
UpperCAmelCase__ = n.right
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
UpperCAmelCase__ , UpperCAmelCase__ = [], []
UpperCAmelCase__ = node
stacka.append(lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase__ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def a_ ( lowerCamelCase = "" , lowerCamelCase=5_0 , lowerCamelCase="*" ):
if not s:
return "\n" + width * char
UpperCAmelCase__ , UpperCAmelCase__ = divmod(width - len(lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
lowerCAmelCase__ : TreeNode = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 98 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__: List[str] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Optional[int] = ['''ChineseCLIPFeatureExtractor''']
A__: Any = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Dict = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276 | 0 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
lowercase : Optional[int] = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def A_ ( A__=True ) -> List[str]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__UpperCAmelCase ) )
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Tuple = None
__A : Union[str, Any] = None
def __lowercase ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
a__ : Dict = dataset_module_factory(lowercase , cache_dir=lowercase)
a__ : str = import_main_class(dataset_module.module_path , dataset=lowercase)
a__ : DatasetBuilder = builder_cls(
cache_dir=lowercase , config_name=lowercase , hash=dataset_module.hash , )
a__ : Dict = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowercase).replace(os.sep , '/'),
config.DATASET_INFO_FILENAME,
])
a__ : List[Any] = cached_path(lowercase , cache_dir=lowercase)
self.assertTrue(os.path.exists(lowercase))
@pytest.mark.integration
def A_ ( A__ ) -> Any:
a__ : List[str] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
a__ : List[Any] = dataset_module_factory('wikipedia' , cache_dir=A__ )
a__ : Optional[int] = import_main_class(dataset_module.module_path )
a__ : DatasetBuilder = builder_cls(
cache_dir=A__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
a__ : List[Any] = None
builder_instance.download_and_prepare()
a__ : str = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def A_ ( A__ ) -> int:
a__ : Dict = dataset_module_factory('wikipedia' , cache_dir=A__ )
a__ : Union[str, Any] = import_main_class(dataset_module.module_path , dataset=A__ )
a__ : DatasetBuilder = builder_cls(
cache_dir=A__ , config_name='20220301.frr' , hash=dataset_module.hash , )
a__ : str = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(A__ , A__ )
assert "train" in ds
assert isinstance(ds['train'] , A__ )
assert next(iter(ds['train'] ) )
| 99 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
_a : dict[str, TrieNode] ={} # Mapping from char to TrieNode
_a : List[str] =False
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
_a : str =self
for char in word:
if char not in curr.nodes:
_a : Dict =TrieNode()
_a : List[Any] =curr.nodes[char]
_a : int =True
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> bool:
'''simple docstring'''
_a : int =self
for char in word:
if char not in curr.nodes:
return False
_a : List[Any] =curr.nodes[char]
return curr.is_leaf
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
def _delete(SCREAMING_SNAKE_CASE :TrieNode , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :int ) -> bool:
if index == len(SCREAMING_SNAKE_CASE ):
# If word does not exist
if not curr.is_leaf:
return False
_a : Any =False
return len(curr.nodes ) == 0
_a : int =word[index]
_a : int =curr.nodes.get(SCREAMING_SNAKE_CASE )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_a : List[Any] =_delete(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , SCREAMING_SNAKE_CASE , 0 )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : TrieNode ,_UpperCAmelCase : str ) -> None:
if node.is_leaf:
print(_UpperCAmelCase ,end=""" """ )
for key, value in node.nodes.items():
print_words(_UpperCAmelCase ,word + key )
def SCREAMING_SNAKE_CASE_ ( ) -> bool:
_a : List[str] ="""banana bananas bandana band apple all beast""".split()
_a : List[Any] =TrieNode()
root.insert_many(_UpperCAmelCase )
# print_words(root, "")
assert all(root.find(_UpperCAmelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : bool ) -> None:
print(str(_UpperCAmelCase ) ,"""works!""" if passes else """doesn't work :(""" )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE_ ( ) -> None:
print_results("""Testing trie functionality""" ,test_trie() )
if __name__ == "__main__":
main()
| 276 | 0 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger()
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowercase : nn.Module
__lowercase : List[nn.Module] = field(default_factory=__a )
__lowercase : list = field(default_factory=__a )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = len(list(m.modules())) == 1 or isinstance(lowerCAmelCase__ , nn.Convad) or isinstance(lowerCAmelCase__ , nn.BatchNormad)
if has_not_submodules:
self.traced.append(lowerCAmelCase__)
def __call__( self , lowerCAmelCase__):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(lowerCAmelCase__)
[x.remove() for x in self.handles]
return self
@property
def snake_case_ ( self):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda lowerCAmelCase__: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowercase : nn.Module
__lowercase : nn.Module
__lowercase : int = 0
__lowercase : List = field(default_factory=__a )
__lowercase : List = field(default_factory=__a )
def __call__( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = Tracker(self.dest)(lowerCAmelCase__).parametrized
__SCREAMING_SNAKE_CASE = Tracker(self.src)(lowerCAmelCase__).parametrized
__SCREAMING_SNAKE_CASE = list(filter(lambda lowerCAmelCase__: type(lowerCAmelCase__) not in self.src_skip , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = list(filter(lambda lowerCAmelCase__: type(lowerCAmelCase__) not in self.dest_skip , lowerCAmelCase__))
if len(lowerCAmelCase__) != len(lowerCAmelCase__):
raise Exception(
f"Numbers of operations are different. Source module has {len(lowerCAmelCase__)} operations while"
f" destination module has {len(lowerCAmelCase__)}.")
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}")
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True ):
print(f"Converting {name}..." )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = timm.create_model(UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval()
__SCREAMING_SNAKE_CASE = ResNetForImageClassification(UpperCamelCase_ ).eval()
__SCREAMING_SNAKE_CASE = ModuleTransfer(src=UpperCamelCase_ , dest=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = torch.randn((1, 3, 224, 224) )
module_transfer(UpperCamelCase_ )
assert torch.allclose(from_model(UpperCamelCase_ ) , our_model(UpperCamelCase_ ).logits ), "The model logits don't match the original one."
__SCREAMING_SNAKE_CASE = f"resnet{'-'.join(name.split('resnet' ) )}"
print(UpperCamelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=UpperCamelCase_ , )
# we can use the convnext one
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=UpperCamelCase_ , )
print(f"Pushed {checkpoint_name}" )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = True ):
__SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE = 1000
__SCREAMING_SNAKE_CASE = (1, num_labels)
__SCREAMING_SNAKE_CASE = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = partial(UpperCamelCase_ , num_labels=UpperCamelCase_ , idalabel=UpperCamelCase_ , labelaid=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(UpperCamelCase_ , names_to_config[model_name] , UpperCamelCase_ , UpperCamelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, expected_shape
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
__magic_name__ = parser.parse_args()
__magic_name__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 100 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A__: str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276 | 0 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase__ :List[str] = datasets.utils.logging.get_logger(__name__)
lowercase__ :Tuple = ["names", "prefix"]
lowercase__ :List[Any] = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
lowercase__ :Any = ["encoding_errors", "on_bad_lines"]
lowercase__ :Optional[int] = ["date_format"]
@dataclass
class lowercase ( datasets.BuilderConfig ):
lowercase_ : str =","
lowercase_ : Optional[str] =None
lowercase_ : Optional[Union[int, List[int], str]] ="infer"
lowercase_ : Optional[List[str]] =None
lowercase_ : Optional[List[str]] =None
lowercase_ : Optional[Union[int, str, List[int], List[str]]] =None
lowercase_ : Optional[Union[List[int], List[str]]] =None
lowercase_ : Optional[str] =None
lowercase_ : bool =True
lowercase_ : Optional[Literal["c", "python", "pyarrow"]] =None
lowercase_ : Dict[Union[int, str], Callable[[Any], Any]] =None
lowercase_ : Optional[list] =None
lowercase_ : Optional[list] =None
lowercase_ : bool =False
lowercase_ : Optional[Union[int, List[int]]] =None
lowercase_ : Optional[int] =None
lowercase_ : Optional[Union[str, List[str]]] =None
lowercase_ : bool =True
lowercase_ : bool =True
lowercase_ : bool =False
lowercase_ : bool =True
lowercase_ : Optional[str] =None
lowercase_ : str ="."
lowercase_ : Optional[str] =None
lowercase_ : str ='"'
lowercase_ : int =0
lowercase_ : Optional[str] =None
lowercase_ : Optional[str] =None
lowercase_ : Optional[str] =None
lowercase_ : Optional[str] =None
lowercase_ : bool =True
lowercase_ : bool =True
lowercase_ : int =0
lowercase_ : bool =True
lowercase_ : bool =False
lowercase_ : Optional[str] =None
lowercase_ : int =10000
lowercase_ : Optional[datasets.Features] =None
lowercase_ : Optional[str] ="strict"
lowercase_ : Literal["error", "warn", "skip"] ="error"
lowercase_ : Optional[str] =None
def A__ ( self):
if self.delimiter is not None:
lowercase = self.delimiter
if self.column_names is not None:
lowercase = self.column_names
@property
def A__ ( self):
lowercase = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,A__):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowercase ( datasets.ArrowBasedBuilder ):
lowercase_ : Optional[int] =CsvConfig
def A__ ( self):
return datasets.DatasetInfo(features=self.config.features)
def A__ ( self ,A__):
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}')
lowercase = dl_manager.download_and_extract(self.config.data_files)
if isinstance(A__ ,(str, list, tuple)):
lowercase = data_files
if isinstance(A__ ,A__):
lowercase = [files]
lowercase = [dl_manager.iter_files(A__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'''files''': files})]
lowercase = []
for split_name, files in data_files.items():
if isinstance(A__ ,A__):
lowercase = [files]
lowercase = [dl_manager.iter_files(A__) for file in files]
splits.append(datasets.SplitGenerator(name=A__ ,gen_kwargs={'''files''': files}))
return splits
def A__ ( self ,A__):
if self.config.features is not None:
lowercase = self.config.features.arrow_schema
if all(not require_storage_cast(A__) for feature in self.config.features.values()):
# cheaper cast
lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=A__)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowercase = table_cast(A__ ,A__)
return pa_table
def A__ ( self ,A__):
lowercase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowercase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(A__) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(A__)):
lowercase = pd.read_csv(A__ ,iterator=A__ ,dtype=A__ ,**self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(A__):
lowercase = pa.Table.from_pandas(A__)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__)
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(A__)}: {e}')
raise
| 101 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
A__: str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : int ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Optional[Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
_a : Union[str, Any] =tensor_name.split(""".""" )
for split in splits[:-1]:
_a : Optional[Any] =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_a : Optional[int] =new_module
_a : Optional[int] =splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
_a : Optional[Any] =tensor_name in module._buffers
_a : str =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
_a : int =False
_a : Tuple =False
if is_buffer or not is_bitsandbytes_available():
_a : str =False
_a : Optional[Any] =False
else:
_a : int =hasattr(bnb.nn ,"""Params4bit""" ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
_a : int =isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
_a : Any =module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_a : int =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : str =value.to("""cpu""" )
if value.dtype == torch.inta:
_a : int =version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
_a : Dict =torch.tensor(_UpperCAmelCase ,device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_UpperCAmelCase ) and fpaa_statistics is None:
_a : int =new_value.T
_a : Any =old_value.__dict__
if is_abit:
_a : Any =bnb.nn.IntaParams(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
elif is_abit:
_a : Union[str, Any] =bnb.nn.Paramsabit(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
_a : List[Any] =new_value
if fpaa_statistics is not None:
setattr(module.weight ,"""SCB""" ,fpaa_statistics.to(_UpperCAmelCase ) )
else:
if value is None:
_a : str =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : Any =value.to(_UpperCAmelCase )
else:
_a : str =torch.tensor(_UpperCAmelCase ,device=_UpperCAmelCase )
if is_buffer:
_a : Optional[int] =new_value
else:
_a : Optional[Any] =nn.Parameter(_UpperCAmelCase ,requires_grad=old_value.requires_grad )
_a : Tuple =new_value
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : str=None ,_UpperCAmelCase : Union[str, Any]=False ) -> Dict:
for name, module in model.named_children():
if current_key_name is None:
_a : Optional[int] =[]
current_key_name.append(_UpperCAmelCase )
if (isinstance(_UpperCAmelCase ,nn.Linear ) or isinstance(_UpperCAmelCase ,_UpperCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(_UpperCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a , _a : int =module.weight.shape
else:
_a : List[str] =module.in_features
_a : Tuple =module.out_features
if quantization_config.quantization_method() == "llm_int8":
_a : Optional[Any] =bnb.nn.LinearabitLt(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
_a : Optional[Any] =True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_a : Dict =bnb.nn.Linearabit(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
_a : List[Any] =True
# Store the module class in case we need to transpose the weight later
_a : int =type(_UpperCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_UpperCAmelCase )
if len(list(module.children() ) ) > 0:
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,has_been_replaced=_UpperCAmelCase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Any=None ) -> Tuple:
_a : Dict =["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : Any ,**_UpperCAmelCase : Any ) -> str:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" ,_UpperCAmelCase ,)
return replace_with_bnb_linear(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : str ,**_UpperCAmelCase : Optional[int] ) -> Optional[int]:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" ,_UpperCAmelCase ,)
return set_module_quantized_tensor_to_device(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> Union[str, Any]:
_a : Any =deepcopy(_UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_a : List[Any] =find_tied_parameters(_UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str =sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
_a : Optional[int] =sum(_UpperCAmelCase ,[] )
_a : List[Any] =len(_UpperCAmelCase ) > 0
# Check if it is a base model
_a : Tuple =not hasattr(_UpperCAmelCase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_a : List[Any] =list(model.named_children() )
_a : Dict =[list_modules[-1][0]]
# add last module together with tied weights
_a : List[str] =set(_UpperCAmelCase ) - set(_UpperCAmelCase )
_a : str =list(set(_UpperCAmelCase ) ) + list(_UpperCAmelCase )
# remove ".weight" from the keys
_a : List[Any] =[""".weight""", """.bias"""]
_a : Any =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_a : Any =name.replace(_UpperCAmelCase ,"""""" )
filtered_module_names.append(_UpperCAmelCase )
return filtered_module_names
| 276 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='yolos'
def __init__(self , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_="gelu" , a_=0.0 , a_=0.0 , a_=0.02 , a_=1E-12 , a_=[5_12, 8_64] , a_=16 , a_=3 , a_=True , a_=1_00 , a_=True , a_=False , a_=1 , a_=5 , a_=2 , a_=5 , a_=2 , a_=0.1 , **a_ , ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : List[Any] = image_size
__snake_case : Tuple = patch_size
__snake_case : str = num_channels
__snake_case : Tuple = qkv_bias
__snake_case : Union[str, Any] = num_detection_tokens
__snake_case : List[str] = use_mid_position_embeddings
__snake_case : Tuple = auxiliary_loss
# Hungarian matcher
__snake_case : List[str] = class_cost
__snake_case : int = bbox_cost
__snake_case : int = giou_cost
# Loss coefficients
__snake_case : Optional[int] = bbox_loss_coefficient
__snake_case : List[str] = giou_loss_coefficient
__snake_case : List[Any] = eos_coefficient
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 1E-4
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 12
| 102 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A__: int = logging.getLogger(__name__)
@dataclass
class A__ :
__UpperCamelCase : str
__UpperCamelCase : List[str]
__UpperCamelCase : Optional[List[str]]
@dataclass
class A__ :
__UpperCamelCase : List[int]
__UpperCamelCase : List[int]
__UpperCamelCase : Optional[List[int]] = None
__UpperCamelCase : Optional[List[int]] = None
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = "train"
__UpperCamelCase : Tuple = "dev"
__UpperCamelCase : str = "test"
class A__ :
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[Split, str] ) -> List[InputExample]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :List[InputExample] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Optional[Any]="[CLS]" , SCREAMING_SNAKE_CASE :Optional[int]=1 , SCREAMING_SNAKE_CASE :Any="[SEP]" , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Union[str, Any]=False , SCREAMING_SNAKE_CASE :List[str]=0 , SCREAMING_SNAKE_CASE :str=0 , SCREAMING_SNAKE_CASE :Dict=-1_0_0 , SCREAMING_SNAKE_CASE :Optional[int]=0 , SCREAMING_SNAKE_CASE :Tuple=True , ) -> List[InputFeatures]:
'''simple docstring'''
_a : str ={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )}
_a : Tuple =[]
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
_a : Optional[Any] =[]
_a : List[Any] =[]
for word, label in zip(example.words , example.labels ):
_a : Optional[int] =tokenizer.tokenize(SCREAMING_SNAKE_CASE )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_a : Optional[int] =tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE ) > max_seq_length - special_tokens_count:
_a : List[Any] =tokens[: (max_seq_length - special_tokens_count)]
_a : Tuple =label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_a : Dict =[sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_a : Any =[cls_token] + tokens
_a : Dict =[pad_token_label_id] + label_ids
_a : Union[str, Any] =[cls_token_segment_id] + segment_ids
_a : List[str] =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_a : Optional[int] =[1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE )
# Zero-pad up to the sequence length.
_a : Union[str, Any] =max_seq_length - len(SCREAMING_SNAKE_CASE )
if pad_on_left:
_a : Optional[Any] =([pad_token] * padding_length) + input_ids
_a : Optional[int] =([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_a : Union[str, Any] =([pad_token_segment_id] * padding_length) + segment_ids
_a : Dict =([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_a : Tuple =None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self :Dict , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :int=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> List[str]:
'''simple docstring'''
# Load data features from cache or dataset file
_a : Optional[Any] =os.path.join(
SCREAMING_SNAKE_CASE , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : List[str] =cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE ):
if os.path.exists(SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
_a : Any =torch.load(SCREAMING_SNAKE_CASE )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
_a : Any =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[str] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , SCREAMING_SNAKE_CASE )
def __len__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :Dict , SCREAMING_SNAKE_CASE :int ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class A__ :
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = -100
def __init__( self :str , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> Any:
'''simple docstring'''
_a : Tuple =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[Any] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
_a : List[Any] =self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self :str ) -> Optional[int]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :int , SCREAMING_SNAKE_CASE :str ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
| 276 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = IFPipeline
_a = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase__ ( self : List[str]):
return self._get_dummy_components()
def UpperCAmelCase__ ( self : List[str] , A_ : List[Any] , A_ : Any=0):
if str(A_).startswith('''mps'''):
lowerCAmelCase_ : List[Any] = torch.manual_seed(A_)
else:
lowerCAmelCase_ : List[str] = torch.Generator(device=A_).manual_seed(A_)
lowerCAmelCase_ : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self : int):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def UpperCAmelCase__ ( self : str):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def UpperCAmelCase__ ( self : str):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def UpperCAmelCase__ ( self : int):
self._test_save_load_local()
def UpperCAmelCase__ ( self : str):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self : Optional[int]):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str]):
# if
lowerCAmelCase_ : Dict = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa)
lowerCAmelCase_ : Dict = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=A_ , tokenizer=A_)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''')
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''')
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(A_ , A_ , A_ , A_)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCAmelCase_ : List[str] = IFImgaImgPipeline(**pipe_a.components)
lowerCAmelCase_ : Any = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(A_ , A_ , A_ , A_)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCAmelCase_ : int = IFInpaintingPipeline(**pipe_a.components)
lowerCAmelCase_ : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(A_ , A_ , A_ , A_)
def UpperCAmelCase__ ( self : int , A_ : Optional[int] , A_ : Any , A_ : str , A_ : Union[str, Any]):
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase_ : Optional[Any] = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : Optional[Any] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , num_inference_steps=2 , generator=A_ , output_type='''np''' , )
lowerCAmelCase_ : Dict = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCAmelCase_ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
lowerCAmelCase_ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''')
assert_mean_pixel_difference(A_ , A_)
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : List[str] = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Optional[int] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCAmelCase_ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCAmelCase_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A_ , A_)
def UpperCAmelCase__ ( self : int , A_ : Optional[int] , A_ : Any , A_ : List[str] , A_ : List[str]):
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase_ : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Tuple = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : Any = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , num_inference_steps=2 , generator=A_ , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCAmelCase_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowerCAmelCase_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''')
assert_mean_pixel_difference(A_ , A_)
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : int = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : List[str] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCAmelCase_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCAmelCase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A_ , A_)
def UpperCAmelCase__ ( self : str , A_ : Optional[Any] , A_ : Optional[Any] , A_ : Dict , A_ : List[str]):
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : int = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1)).to(A_)
lowerCAmelCase_ : Optional[Any] = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : Any = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , num_inference_steps=2 , generator=A_ , output_type='''np''' , )
lowerCAmelCase_ : List[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCAmelCase_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowerCAmelCase_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''')
assert_mean_pixel_difference(A_ , A_)
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : Optional[Any] = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Optional[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1)).to(A_)
lowerCAmelCase_ : int = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase_ : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCAmelCase_ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCAmelCase_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A_ , A_)
def UpperCamelCase( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 103 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> Optional[int]:
'''simple docstring'''
_a , _a : List[str] =text, pattern
_a , _a : Union[str, Any] =len(SCREAMING_SNAKE_CASE ), len(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __UpperCAmelCase ( self :Union[str, Any] ) -> list[int]:
'''simple docstring'''
# searches pattern in text and returns index positions
_a : Union[str, Any] =[]
for i in range(self.textLen - self.patLen + 1 ):
_a : Any =self.mismatch_in_text(SCREAMING_SNAKE_CASE )
if mismatch_index == -1:
positions.append(SCREAMING_SNAKE_CASE )
else:
_a : int =self.match_in_pattern(self.text[mismatch_index] )
_a : List[str] =(
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A__: Any = '''ABAABA'''
A__: int = '''AB'''
A__: Optional[int] = BoyerMooreSearch(text, pattern)
A__: Optional[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 276 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'audio': Audio()} )
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'transcription': Value('string' )} )
SCREAMING_SNAKE_CASE : str = "audio"
SCREAMING_SNAKE_CASE : str = "transcription"
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any] ):
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features." )
if not isinstance(features[self.audio_column] ,lowercase__ ):
raise ValueError(F"Column {self.audio_column} is not an Audio type." )
__lowercase = copy.deepcopy(self )
__lowercase = self.input_schema.copy()
__lowercase = features[self.audio_column]
__lowercase = input_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 104 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
A__: Dict = None
A__: Tuple = {
'''7B''': 1_1008,
'''13B''': 1_3824,
'''30B''': 1_7920,
'''65B''': 2_2016,
'''70B''': 2_8672,
}
A__: Any = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int]=1 ,_UpperCAmelCase : List[str]=256 ) -> Dict:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ) -> List[str]:
with open(_UpperCAmelCase ,"""r""" ) as f:
return json.load(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ) -> Tuple:
with open(_UpperCAmelCase ,"""w""" ) as f:
json.dump(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any]=True ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
_a : Union[str, Any] =os.path.join(_UpperCAmelCase ,"""tmp""" )
os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
_a : int =read_json(os.path.join(_UpperCAmelCase ,"""params.json""" ) )
_a : int =NUM_SHARDS[model_size]
_a : Dict =params["""n_layers"""]
_a : Union[str, Any] =params["""n_heads"""]
_a : List[str] =n_heads // num_shards
_a : int =params["""dim"""]
_a : Union[str, Any] =dim // n_heads
_a : int =1_0_0_0_0.0
_a : str =1.0 / (base ** (torch.arange(0 ,_UpperCAmelCase ,2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_a : str =params["""n_kv_heads"""] # for GQA / MQA
_a : Optional[Any] =n_heads_per_shard // num_key_value_heads
_a : Optional[int] =dim // num_key_value_heads
else: # compatibility with other checkpoints
_a : str =n_heads
_a : Any =n_heads_per_shard
_a : str =dim
# permute for sliced rotary
def permute(_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int]=n_heads ,_UpperCAmelCase : Optional[int]=dim ,_UpperCAmelCase : List[str]=dim ):
return w.view(_UpperCAmelCase ,dima // n_heads // 2 ,2 ,_UpperCAmelCase ).transpose(1 ,2 ).reshape(_UpperCAmelCase ,_UpperCAmelCase )
print(F"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_a : Any =torch.load(os.path.join(_UpperCAmelCase ,"""consolidated.00.pth""" ) ,map_location="""cpu""" )
else:
# Sharded
_a : List[Any] =[
torch.load(os.path.join(_UpperCAmelCase ,F"consolidated.{i:02d}.pth" ) ,map_location="""cpu""" )
for i in range(_UpperCAmelCase )
]
_a : Any =0
_a : Optional[int] ={"""weight_map""": {}}
for layer_i in range(_UpperCAmelCase ):
_a : List[str] =F"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_a : List[str] ={
F"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wq.weight"] ),
F"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wk.weight"] ),
F"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[F"layers.{layer_i}.attention.wv.weight"],
F"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[F"layers.{layer_i}.attention.wo.weight"],
F"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w1.weight"],
F"model.layers.{layer_i}.mlp.down_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w2.weight"],
F"model.layers.{layer_i}.mlp.up_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w3.weight"],
F"model.layers.{layer_i}.input_layernorm.weight": loaded[F"layers.{layer_i}.attention_norm.weight"],
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[F"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_a : Tuple ={
F"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
F"layers.{layer_i}.attention_norm.weight"
].clone(),
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
F"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
_a : str =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wq.weight"].view(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Tuple =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wk.weight"].view(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wv.weight"].view(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase )
_a : List[str] =torch.cat(
[loaded[i][F"layers.{layer_i}.attention.wo.weight"] for i in range(_UpperCAmelCase )] ,dim=1 )
_a : Union[str, Any] =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w1.weight"] for i in range(_UpperCAmelCase )] ,dim=0 )
_a : Tuple =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w2.weight"] for i in range(_UpperCAmelCase )] ,dim=1 )
_a : Union[str, Any] =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w3.weight"] for i in range(_UpperCAmelCase )] ,dim=0 )
_a : str =inv_freq
for k, v in state_dict.items():
_a : Any =filename
param_count += v.numel()
torch.save(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Union[str, Any] =F"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_a : List[str] ={
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_a : int ={
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(_UpperCAmelCase )] ,dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_UpperCAmelCase )] ,dim=0 ),
}
for k, v in state_dict.items():
_a : Dict =filename
param_count += v.numel()
torch.save(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )
# Write configs
_a : Tuple ={"""total_size""": param_count * 2}
write_json(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,"""pytorch_model.bin.index.json""" ) )
_a : Optional[Any] =params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_a : int =params["""multiple_of"""] if """multiple_of""" in params else 256
_a : List[Any] =LlamaConfig(
hidden_size=_UpperCAmelCase ,intermediate_size=compute_intermediate_size(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,num_attention_heads=params["""n_heads"""] ,num_hidden_layers=params["""n_layers"""] ,rms_norm_eps=params["""norm_eps"""] ,num_key_value_heads=_UpperCAmelCase ,)
config.save_pretrained(_UpperCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_a : Any =LlamaForCausalLM.from_pretrained(_UpperCAmelCase ,torch_dtype=torch.floataa ,low_cpu_mem_usage=_UpperCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(_UpperCAmelCase ,safe_serialization=_UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> Optional[Any]:
# Initialize the tokenizer based on the `spm` model
_a : List[str] =LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
_a : List[Any] =tokenizer_class(_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : List[str] =argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" ,help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" ,)
parser.add_argument(
"""--model_size""" ,choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] ,)
parser.add_argument(
"""--output_dir""" ,help="""Location to write HF model and tokenizer""" ,)
parser.add_argument("""--safe_serialization""" ,type=_UpperCAmelCase ,help="""Whether or not to save using `safetensors`.""" )
_a : Optional[Any] =parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir ,input_base_path=os.path.join(args.input_dir ,args.model_size ) ,model_size=args.model_size ,safe_serialization=args.safe_serialization ,)
_a : List[Any] =os.path.join(args.input_dir ,"""tokenizer.model""" )
write_tokenizer(args.output_dir ,_UpperCAmelCase )
if __name__ == "__main__":
main()
| 276 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : Optional[Any] = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ['''CLIPFeatureExtractor''']
a : List[Any] = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : str ) -> Dict:
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" ,[False, True] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ) -> Optional[Any]:
_a : Any =tmp_path / """cache"""
_a : int ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : Tuple =SqlDatasetReader(
"""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase ,_UpperCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" ,[
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ) -> List[Any]:
_a : Union[str, Any] =tmp_path / """cache"""
_a : str ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_a : Optional[int] =features.copy() if features else default_expected_features
_a : Union[str, Any] =(
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : Optional[Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
with contextlib.closing(sqlitea.connect(_UpperCAmelCase ) ) as con:
_a : Any =con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> Union[str, Any]:
_a : Union[str, Any] =tmp_path / """cache"""
_a : Union[str, Any] =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Tuple =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=1 ).write()
_a : Tuple =iter_sql_file(_UpperCAmelCase )
_a : List[Any] =iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase ,_UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[Any] ) -> Optional[int]:
_a : int =tmp_path / """cache"""
_a : Any =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Union[str, Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=2 ).write()
_a : List[Any] =iter_sql_file(_UpperCAmelCase )
_a : str =iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase ,_UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ) -> List[str]:
_a : List[str] =tmp_path / """cache"""
_a : Dict =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Optional[Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
with pytest.raises(_UpperCAmelCase ):
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=0 ).write()
| 276 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = MBartConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self : Union[str, Any] ,lowercase_ : int ,lowercase_ : Optional[Any]=1_3 ,lowercase_ : Dict=7 ,lowercase_ : Union[str, Any]=True ,lowercase_ : str=False ,lowercase_ : Optional[Any]=9_9 ,lowercase_ : Dict=3_2 ,lowercase_ : Tuple=2 ,lowercase_ : Union[str, Any]=4 ,lowercase_ : Optional[Any]=3_7 ,lowercase_ : Union[str, Any]=0.1 ,lowercase_ : List[str]=0.1 ,lowercase_ : Any=2_0 ,lowercase_ : Optional[Any]=2 ,lowercase_ : Any=1 ,lowercase_ : List[Any]=0 ,):
lowerCAmelCase__ : Union[str, Any] = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : int = is_training
lowerCAmelCase__ : List[str] = use_labels
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : str = max_position_embeddings
lowerCAmelCase__ : Any = eos_token_id
lowerCAmelCase__ : Optional[Any] = pad_token_id
lowerCAmelCase__ : Optional[Any] = bos_token_id
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
lowerCAmelCase__ : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
lowerCAmelCase__ : Tuple = tf.concat([input_ids, eos_tensor] ,axis=1 )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase__ : List[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
lowerCAmelCase__ : Dict = prepare_mbart_inputs_dict(lowercase_ ,lowercase_ ,lowercase_ )
return config, inputs_dict
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Any ,lowercase_ : Dict ):
lowerCAmelCase__ : List[Any] = TFMBartModel(config=lowercase_ ).get_decoder()
lowerCAmelCase__ : Any = inputs_dict['''input_ids''']
lowerCAmelCase__ : int = input_ids[:1, :]
lowerCAmelCase__ : Dict = inputs_dict['''attention_mask'''][:1, :]
lowerCAmelCase__ : str = inputs_dict['''head_mask''']
lowerCAmelCase__ : Tuple = 1
# first forward pass
lowerCAmelCase__ : str = model(lowercase_ ,attention_mask=lowercase_ ,head_mask=lowercase_ ,use_cache=lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : int = outputs.to_tuple()
lowerCAmelCase__ : List[str] = past_key_values[1]
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_=None , A_=None , A_=None , A_=None , A_=None , ):
if attention_mask is None:
lowerCAmelCase__ : Optional[Any] = tf.cast(tf.math.not_equal(A_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase__ : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase__ = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
def __lowerCAmelCase ( self : Any ,lowercase_ : int ,lowercase_ : Optional[Any] ,lowercase_ : Any ,lowercase_ : str ,lowercase_ : str ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : int = TFMBartModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self ,config_class=lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = [
" UN Chief Says There Is No Military Solution in Syria",
]
lowercase__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
lowercase__ = "facebook/mbart-large-en-ro"
@cached_property
def __lowerCAmelCase ( self : Tuple ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCAmelCase ( self : List[Any] ,**lowercase_ : int ):
lowerCAmelCase__ : int = self.translate_src_text(**lowercase_ )
self.assertListEqual(self.expected_text ,lowercase_ )
def __lowerCAmelCase ( self : Any ,**lowercase_ : Any ):
lowerCAmelCase__ : List[str] = self.tokenizer(self.src_text ,**lowercase_ ,return_tensors='''tf''' )
lowerCAmelCase__ : Union[str, Any] = self.model.generate(
model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 )
lowerCAmelCase__ : Dict = self.tokenizer.batch_decode(lowercase_ ,skip_special_tokens=lowercase_ )
return generated_words
@slow
def __lowerCAmelCase ( self : Dict ):
self._assert_generated_batch_equal_expected()
| 106 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: List[str] = logging.get_logger(__name__)
A__: Union[str, Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : int = "data2vec-text"
def __init__( self :str , SCREAMING_SNAKE_CASE :Optional[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Any=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Dict=3_0_7_2 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :List[str]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :int=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Dict=1e-12 , SCREAMING_SNAKE_CASE :int=1 , SCREAMING_SNAKE_CASE :Dict=0 , SCREAMING_SNAKE_CASE :List[Any]=2 , SCREAMING_SNAKE_CASE :str="absolute" , SCREAMING_SNAKE_CASE :Tuple=True , SCREAMING_SNAKE_CASE :Union[str, Any]=None , **SCREAMING_SNAKE_CASE :Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : Optional[Any] =vocab_size
_a : Optional[Any] =hidden_size
_a : Any =num_hidden_layers
_a : List[str] =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Any =intermediate_size
_a : str =hidden_dropout_prob
_a : Optional[Any] =attention_probs_dropout_prob
_a : Optional[Any] =max_position_embeddings
_a : Union[str, Any] =type_vocab_size
_a : Tuple =initializer_range
_a : Optional[int] =layer_norm_eps
_a : Tuple =position_embedding_type
_a : int =use_cache
_a : List[str] =classifier_dropout
class A__ ( UpperCAmelCase__ ):
@property
def __UpperCAmelCase ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_a : Tuple ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_a : List[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 276 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowerCAmelCase : Any = datasets.load_iris()
__lowerCAmelCase : List[Any] = np.array(data['data'])
__lowerCAmelCase : List[str] = np.array(data['target'])
__lowerCAmelCase : Dict = data['target_names']
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = train_test_split(X, y)
def __magic_name__ ( A : Optional[Any], A : Optional[Any] ):
'''simple docstring'''
return np.linalg.norm(np.array(A ) - np.array(A ) )
def __magic_name__ ( A : Tuple, A : List[Any], A : int, A : str, A : int=5 ):
'''simple docstring'''
a = zip(A, A )
# List of distances of all points from the point to be classified
a = []
for data_point in data:
a = euclidean_distance(data_point[0], A )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
a = [i[1] for i in sorted(A )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
a = Counter(A ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 107 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
A__: Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> int:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] = None ) -> Optional[int]:
_a : Any =tesseract_config if tesseract_config is not None else """"""
# apply OCR
_a : Optional[Any] =to_pil_image(_UpperCAmelCase )
_a , _a : List[Any] =pil_image.size
_a : List[str] =pytesseract.image_to_data(_UpperCAmelCase ,lang=_UpperCAmelCase ,output_type="""dict""" ,config=_UpperCAmelCase )
_a , _a , _a , _a , _a : str =data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_a : Tuple =[idx for idx, word in enumerate(_UpperCAmelCase ) if not word.strip()]
_a : List[Any] =[word for idx, word in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Dict =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : List[str] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : List[str] =[]
for x, y, w, h in zip(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
_a : int =[x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase )
# finally, normalize the bounding boxes
_a : str =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[Any] = ["pixel_values"]
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = "" , **SCREAMING_SNAKE_CASE :Tuple , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : List[Any] =size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_a : Tuple =get_size_dict(SCREAMING_SNAKE_CASE )
_a : Dict =do_resize
_a : Tuple =size
_a : str =resample
_a : Dict =apply_ocr
_a : Union[str, Any] =ocr_lang
_a : Dict =tesseract_config
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Dict , ) -> np.ndarray:
'''simple docstring'''
_a : int =get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
_a : Any =(size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :ImageInput , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE :ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
_a : Optional[int] =do_resize if do_resize is not None else self.do_resize
_a : Optional[int] =size if size is not None else self.size
_a : str =get_size_dict(SCREAMING_SNAKE_CASE )
_a : List[str] =resample if resample is not None else self.resample
_a : int =apply_ocr if apply_ocr is not None else self.apply_ocr
_a : str =ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Union[str, Any] =tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[str] =make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_a : List[Any] =[to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_a : Any =[]
_a : Any =[]
for image in images:
_a , _a : int =apply_tesseract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
words_batch.append(SCREAMING_SNAKE_CASE )
boxes_batch.append(SCREAMING_SNAKE_CASE )
if do_resize:
_a : Union[str, Any] =[self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_a : Dict =[flip_channel_order(SCREAMING_SNAKE_CASE ) for image in images]
_a : str =[to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
_a : str =BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE )
if apply_ocr:
_a : List[Any] =words_batch
_a : Dict =boxes_batch
return data
| 276 | 0 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> bool:
lowerCAmelCase : List[str] = False
if low == high:
return swapped
lowerCAmelCase : List[str] = low
lowerCAmelCase : Dict = high
while left < right:
if collection[left] > collection[right]:
lowerCAmelCase , lowerCAmelCase : int = (
collection[right],
collection[left],
)
lowerCAmelCase : Tuple = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowerCAmelCase , lowerCAmelCase : int = (
collection[right + 1],
collection[left],
)
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : List[Any] = low + int((high - low) / 2 )
lowerCAmelCase : Union[str, Any] = circle_sort_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = circle_sort_util(SCREAMING_SNAKE_CASE , mid + 1 , SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
lowerCAmelCase : Dict = True
while is_not_sorted is True:
lowerCAmelCase : List[Any] = circle_sort_util(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 108 |
'''simple docstring'''
from __future__ import annotations
import requests
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> dict:
_a : Any =F"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(_UpperCAmelCase ).json()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> list[dict]:
_a : Union[str, Any] ="""https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
_a : int =requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> str:
_a : Union[str, Any] =hackernews_top_stories(_UpperCAmelCase )
return "\n".join("""* [{title}]({url})""".format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 276 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase : Tuple = DisjunctiveConstraint(_SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(_SCREAMING_SNAKE_CASE ) # fails here
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Tuple = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase : Tuple = DisjunctiveConstraint(_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = dc.update(1 )
UpperCAmelCase : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(_SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = dc.update(2 )
UpperCAmelCase : List[str] = stepped is True and completed is False and reset is False
self.assertTrue(_SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = dc.update(3 )
UpperCAmelCase : int = stepped is True and completed is True and reset is False
self.assertTrue(_SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase : Tuple = DisjunctiveConstraint(_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 109 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : torch.FloatTensor
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :Tuple[str] = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[str] = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[int] = (6_4,) , SCREAMING_SNAKE_CASE :int = 1 , SCREAMING_SNAKE_CASE :str = "silu" , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :int = 2_5_6 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :float = 0.18_215 , SCREAMING_SNAKE_CASE :str = "group" , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_a : Union[str, Any] =Encoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , down_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , double_z=SCREAMING_SNAKE_CASE , )
_a : Optional[int] =vq_embed_dim if vq_embed_dim is not None else latent_channels
_a : Optional[int] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
_a : str =VectorQuantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beta=0.25 , remap=SCREAMING_SNAKE_CASE , sane_index_shape=SCREAMING_SNAKE_CASE )
_a : List[str] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
# pass init params to Decoder
_a : List[str] =Decoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , up_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , norm_type=SCREAMING_SNAKE_CASE , )
@apply_forward_hook
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> VQEncoderOutput:
'''simple docstring'''
_a : Optional[int] =self.encoder(SCREAMING_SNAKE_CASE )
_a : int =self.quant_conv(SCREAMING_SNAKE_CASE )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE )
@apply_forward_hook
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
_a , _a , _a : Tuple =self.quantize(SCREAMING_SNAKE_CASE )
else:
_a : str =h
_a : Dict =self.post_quant_conv(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =self.decoder(SCREAMING_SNAKE_CASE , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
_a : Tuple =sample
_a : int =self.encode(SCREAMING_SNAKE_CASE ).latents
_a : List[Any] =self.decode(SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE )
| 276 | 0 |
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
lowerCAmelCase = multiprocessing.Manager()
lowerCAmelCase = manager.list()
lowerCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowerCAmelCase = shutil.rmtree
lowerCAmelCase = os.rmdir
lowerCAmelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowerCAmelCase = {}
with swallow_io():
with time_limit(_UpperCAmelCase ):
exec(_UpperCAmelCase , _UpperCAmelCase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"failed: {e}" )
# Needed for cleaning up.
lowerCAmelCase = rmtree
lowerCAmelCase = rmdir
lowerCAmelCase = chdir
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Any:
def signal_handler(snake_case__ , snake_case__ ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase )
signal.signal(signal.SIGALRM , _UpperCAmelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
lowerCAmelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(_UpperCAmelCase ):
with contextlib.redirect_stderr(_UpperCAmelCase ):
with redirect_stdin(_UpperCAmelCase ):
yield
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( ) -> str:
with tempfile.TemporaryDirectory() as dirname:
with chdir(_UpperCAmelCase ):
yield dirname
class lowercase_ ( UpperCAmelCase__ ):
"""simple docstring"""
pass
class lowercase_ ( io.StringIO ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Dict:
raise OSError
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]:
raise OSError
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Dict:
raise OSError
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
return False
class lowercase_ ( contextlib._RedirectStream ): # type: ignore
"""simple docstring"""
UpperCAmelCase_ : List[Any] = "stdin"
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Any:
if root == ".":
yield
return
lowerCAmelCase = os.getcwd()
os.chdir(_UpperCAmelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( snake_case__=None ) -> List[str]:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowerCAmelCase = None
lowerCAmelCase = None
import os
lowerCAmelCase = """1"""
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
import shutil
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
import subprocess
lowerCAmelCase = None # type: ignore
lowerCAmelCase = None
import sys
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
| 338 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int]=1_3 , SCREAMING_SNAKE_CASE :Optional[int]=7 , SCREAMING_SNAKE_CASE :Tuple=False , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :List[str]=3_3 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Tuple=5 , SCREAMING_SNAKE_CASE :int=4 , SCREAMING_SNAKE_CASE :Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Optional[Any]=0.1 , SCREAMING_SNAKE_CASE :Tuple=0.1 , SCREAMING_SNAKE_CASE :str=5_1_2 , SCREAMING_SNAKE_CASE :Dict=1_6 , SCREAMING_SNAKE_CASE :Dict=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :str=3 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :List[str]=None , ) -> Union[str, Any]:
'''simple docstring'''
_a : Union[str, Any] =parent
_a : List[Any] =batch_size
_a : Optional[int] =seq_length
_a : Union[str, Any] =is_training
_a : List[Any] =use_input_mask
_a : Optional[int] =use_token_type_ids
_a : int =use_labels
_a : List[str] =vocab_size
_a : List[Any] =hidden_size
_a : int =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Any =intermediate_size
_a : str =hidden_act
_a : Union[str, Any] =hidden_dropout_prob
_a : Union[str, Any] =attention_probs_dropout_prob
_a : str =max_position_embeddings
_a : Dict =type_vocab_size
_a : Tuple =type_sequence_label_size
_a : Dict =initializer_range
_a : List[str] =num_labels
_a : Tuple =num_choices
_a : int =scope
def __UpperCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : List[Any] =None
if self.use_input_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
_a : str =None
_a : Dict =None
if self.use_labels:
_a : Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[str] =ids_tensor([self.batch_size] , self.num_choices )
_a : List[Any] =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int ) -> Tuple:
'''simple docstring'''
_a : Any =EsmModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Optional[Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
_a : str =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : str =EsmForMaskedLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.num_labels
_a : Tuple =EsmForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Tuple =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
_a : Optional[Any] =self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Any =config_and_inputs
_a : List[Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Any = False
__UpperCamelCase : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : str = ()
__UpperCamelCase : List[str] = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Union[str, Any] = True
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
_a : Dict =EsmModelTester(self )
_a : Optional[Any] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCAmelCase ( self :Tuple ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Dict =type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
_a : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
_a : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :str ) -> Dict:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Union[str, Any] =EsmModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
_a : Optional[Any] =self.model_tester.prepare_config_and_inputs()[0]
_a : Dict =EsmEmbeddings(config=SCREAMING_SNAKE_CASE )
_a : Tuple =torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
_a : Optional[Any] =torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_a : Any =create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =self.model_tester.prepare_config_and_inputs()[0]
_a : Optional[int] =EsmEmbeddings(config=SCREAMING_SNAKE_CASE )
_a : Tuple =torch.empty(2 , 4 , 3_0 )
_a : str =[
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_a : int =torch.as_tensor([expected_single_positions, expected_single_positions] )
_a : Any =embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def __UpperCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def __UpperCAmelCase ( self :str ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
pass
@require_torch
class A__ ( UpperCAmelCase__ ):
@slow
def __UpperCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
with torch.no_grad():
_a : Optional[int] =EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_a : Any =torch.tensor([[0, 1, 2, 3, 4, 5]] )
_a : Tuple =model(SCREAMING_SNAKE_CASE )[0]
_a : int =3_3
_a : Tuple =torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
_a : Any =EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_a : Any =torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_a : int =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 276 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class a__ ( UpperCAmelCase__ ):
_a : str = "time_series_transformer"
_a : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , _A = None , _A = None , _A = "student_t" , _A = "nll" , _A = 1 , _A = [1, 2, 3, 4, 5, 6, 7] , _A = "mean" , _A = 0 , _A = 0 , _A = 0 , _A = 0 , _A = None , _A = None , _A = 3_2 , _A = 3_2 , _A = 2 , _A = 2 , _A = 2 , _A = 2 , _A = True , _A = "gelu" , _A = 6_4 , _A = 0.1 , _A = 0.1 , _A = 0.1 , _A = 0.1 , _A = 0.1 , _A = 1_0_0 , _A = 0.02 , _A=True , **_A , ):
"""simple docstring"""
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length or prediction_length
__lowerCAmelCase = distribution_output
__lowerCAmelCase = loss
__lowerCAmelCase = input_size
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = scaling
__lowerCAmelCase = num_dynamic_real_features
__lowerCAmelCase = num_static_real_features
__lowerCAmelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_A ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__lowerCAmelCase = cardinality
else:
__lowerCAmelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_A ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__lowerCAmelCase = embedding_dimension
else:
__lowerCAmelCase = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCAmelCase = num_parallel_samples
# Transformer architecture configuration
__lowerCAmelCase = input_size * len(_A ) + self._number_of_features
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = use_cache
super().__init__(is_encoder_decoder=_A , **_A )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 92 |
'''simple docstring'''
from math import isqrt
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 ,isqrt(_UpperCAmelCase ) + 1 ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10**6 ) -> int:
_a : List[Any] =0
_a : str =1
_a : Optional[Any] =7
while prime_candidate < max_prime:
primes_count += is_prime(_UpperCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 276 | 0 |
import collections
import os
import re
from pathlib import Path
A_ : str = '''src/transformers'''
# Matches is_xxx_available()
A_ : List[str] = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
A_ : Optional[int] = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
A_ : int = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
A_ : List[Any] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
A_ : Dict = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
A_ : Optional[int] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
A_ : Any = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
A_ : Optional[int] = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
A_ : Any = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
A_ : Tuple = re.compile(r'^\s*try:')
# Catches a line with else:
A_ : str = re.compile(r'^\s*else:')
def UpperCamelCase (lowercase_: List[str] ) -> List[Any]:
if _re_test_backend.search(_UpperCAmelCase ) is None:
return None
A__ : int = [b[0] for b in _re_backend.findall(_UpperCAmelCase )]
backends.sort()
return "_and_".join(_UpperCAmelCase )
def UpperCamelCase (lowercase_: Dict ) -> Dict:
with open(_UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A__ : Union[str, Any] = f.readlines()
A__ : Union[str, Any] = 0
while line_index < len(_UpperCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
A__ : str = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
A__ : List[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_UpperCAmelCase ):
A__ : Optional[Any] = _re_one_line_import_struct.search(_UpperCAmelCase ).groups()[0]
A__ : Union[str, Any] = re.findall(r"""\[([^\]]+)\]""" , _UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
A__ : Optional[Any] = _re_import_struct_key_value.search(_UpperCAmelCase )
if single_line_import_search is not None:
A__ : Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_UpperCAmelCase ) > 0]
objects.extend(_UpperCAmelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
A__ : List[str] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
A__ : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
A__ : int = lines[line_index]
if _re_import_struct_add_one.search(_UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_UpperCAmelCase ) is not None:
A__ : Any = _re_import_struct_add_many.search(_UpperCAmelCase ).groups()[0].split(""", """ )
A__ : Tuple = [obj[1:-1] for obj in imports if len(_UpperCAmelCase ) > 0]
objects.extend(_UpperCAmelCase )
elif _re_between_brackets.search(_UpperCAmelCase ) is not None:
A__ : Dict = _re_between_brackets.search(_UpperCAmelCase ).groups()[0].split(""", """ )
A__ : Dict = [obj[1:-1] for obj in imports if len(_UpperCAmelCase ) > 0]
objects.extend(_UpperCAmelCase )
elif _re_quote_object.search(_UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(_UpperCAmelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
A__ : Any = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A__ : Optional[Any] = []
while (
line_index < len(_UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
A__ : Tuple = lines[line_index]
A__ : Dict = _re_import.search(_UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
A__ : Tuple = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(_UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
A__ : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
A__ : int = lines[line_index]
A__ : List[str] = _re_import.search(_UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
A__ : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCamelCase (lowercase_: Dict , lowercase_: Any ) -> int:
def find_duplicates(lowercase_: Union[str, Any] ):
return [k for k, v in collections.Counter(_UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A__ : List[str] = []
for key in import_dict_objects.keys():
A__ : Optional[int] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
A__ : List[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A__ : Any = """base imports""" if key == """none""" else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def UpperCamelCase () -> Any:
A__ : Any = []
for root, _, files in os.walk(_UpperCAmelCase ):
if "__init__.py" in files:
A__ : Tuple = os.path.join(_UpperCAmelCase , """__init__.py""" )
A__ : Any = parse_init(_UpperCAmelCase )
if objects is not None:
A__ : Any = analyze_results(*_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
A__ : str = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("""\n""".join(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) > 0:
raise ValueError("""\n\n""".join(_UpperCAmelCase ) )
def UpperCamelCase () -> List[Any]:
A__ : Dict = []
for path, directories, files in os.walk(_UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(_UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_UpperCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
A__ : Optional[Any] = str((Path(_UpperCAmelCase ) / folder).relative_to(_UpperCAmelCase ) )
A__ : Any = short_path.replace(os.path.sep , """.""" )
submodules.append(_UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
A__ : Any = str((Path(_UpperCAmelCase ) / fname).relative_to(_UpperCAmelCase ) )
A__ : List[str] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(_UpperCAmelCase )
return submodules
A_ : int = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def UpperCamelCase () -> Optional[Any]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
A__ : int = direct_transformers_import(_UpperCAmelCase )
A__ : Any = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_UpperCAmelCase , """__init__.py""" ) , """r""" ) as f:
A__ : Tuple = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , _UpperCAmelCase ) ) )
A__ : List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_UpperCAmelCase ) > 0:
A__ : Optional[int] = """\n""".join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f"""{list_of_modules}\n"""
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 192 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 276 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" ,[
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(_UpperCAmelCase ,i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Dict ) -> Optional[Any]:
_a : Tuple =_distribute_shards(**_UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" ,[
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any] ) -> List[str]:
_a : List[str] =_split_gen_kwargs(_UpperCAmelCase ,_UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" ,[
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
if expected is RuntimeError:
with pytest.raises(_UpperCAmelCase ):
_number_of_shards_in_gen_kwargs(_UpperCAmelCase )
else:
_a : Dict =_number_of_shards_in_gen_kwargs(_UpperCAmelCase )
assert out == expected
| 276 | 0 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self: List[Any], a_: WhisperForConditionalGeneration, a_: WhisperProcessor, a_: AutoencoderKL, a_: CLIPTextModel, a_: CLIPTokenizer, a_: UNetaDConditionModel, a_: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], a_: StableDiffusionSafetyChecker, a_: CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=a_, speech_processor=a_, vae=a_, text_encoder=a_, tokenizer=a_, unet=a_, scheduler=a_, feature_extractor=a_, )
def UpperCamelCase_ ( self: str, a_: Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
_snake_case : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
self.enable_attention_slicing(a_ )
@torch.no_grad()
def __call__( self: List[Any], a_: int, a_: List[str]=16_000, a_: int = 512, a_: int = 512, a_: int = 50, a_: float = 7.5, a_: Optional[Union[str, List[str]]] = None, a_: Optional[int] = 1, a_: float = 0.0, a_: Optional[torch.Generator] = None, a_: Optional[torch.FloatTensor] = None, a_: Optional[str] = "pil", a_: bool = True, a_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, a_: int = 1, **a_: int, ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.speech_processor.feature_extractor(
a_, return_tensors="""pt""", sampling_rate=a_ ).input_features.to(self.device )
_snake_case : Any = self.speech_model.generate(a_, max_length=480_000 )
_snake_case : Dict = self.speech_processor.tokenizer.batch_decode(a_, skip_special_tokens=a_, normalize=a_ )[
0
]
if isinstance(a_, a_ ):
_snake_case : List[Any] = 1
elif isinstance(a_, a_ ):
_snake_case : Union[str, Any] = len(a_ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(a_ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a_, a_ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(a_ )}." )
# get prompt text embeddings
_snake_case : Dict = self.tokenizer(
a_, padding="""max_length""", max_length=self.tokenizer.model_max_length, return_tensors="""pt""", )
_snake_case : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_snake_case : Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
_snake_case : str = text_input_ids[:, : self.tokenizer.model_max_length]
_snake_case : Dict = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_snake_case : Any = text_embeddings.shape
_snake_case : str = text_embeddings.repeat(1, a_, 1 )
_snake_case : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt, a_, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_snake_case : List[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_snake_case : List[str]
if negative_prompt is None:
_snake_case : str = [""""""] * batch_size
elif type(a_ ) is not type(a_ ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(a_ )} !="
f" {type(a_ )}." )
elif isinstance(a_, a_ ):
_snake_case : List[Any] = [negative_prompt]
elif batch_size != len(a_ ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(a_ )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
""" the batch size of `prompt`.""" )
else:
_snake_case : int = negative_prompt
_snake_case : Tuple = text_input_ids.shape[-1]
_snake_case : List[Any] = self.tokenizer(
a_, padding="""max_length""", max_length=a_, truncation=a_, return_tensors="""pt""", )
_snake_case : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_snake_case : Dict = uncond_embeddings.shape[1]
_snake_case : Tuple = uncond_embeddings.repeat(1, a_, 1 )
_snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt, a_, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_snake_case : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_snake_case : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_snake_case : List[Any] = torch.randn(a_, generator=a_, device="""cpu""", dtype=a_ ).to(
self.device )
else:
_snake_case : str = torch.randn(a_, generator=a_, device=self.device, dtype=a_ )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
_snake_case : Tuple = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(a_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_snake_case : int = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_snake_case : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_snake_case : Optional[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_snake_case : Optional[int] = {}
if accepts_eta:
_snake_case : Optional[int] = eta
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the latents if we are doing classifier free guidance
_snake_case : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_snake_case : Optional[Any] = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
_snake_case : Optional[Any] = self.unet(a_, a_, encoder_hidden_states=a_ ).sample
# perform guidance
if do_classifier_free_guidance:
_snake_case : Tuple = noise_pred.chunk(2 )
_snake_case : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_snake_case : Union[str, Any] = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a_, a_, a_ )
_snake_case : Tuple = 1 / 0.18_215 * latents
_snake_case : Union[str, Any] = self.vae.decode(a_ ).sample
_snake_case : int = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_snake_case : List[str] = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
_snake_case : Optional[Any] = self.numpy_to_pil(a_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=a_, nsfw_content_detected=a_ )
| 64 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Tuple = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "roc_bert"
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :Tuple=3_0_5_2_2 , SCREAMING_SNAKE_CASE :List[str]=7_6_8 , SCREAMING_SNAKE_CASE :Dict=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE :List[Any]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Optional[int]="absolute" , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :Optional[Any]=9_1_0 , SCREAMING_SNAKE_CASE :Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE :str=2_4_8_5_8 , SCREAMING_SNAKE_CASE :List[Any]=True , **SCREAMING_SNAKE_CASE :Tuple , ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =vocab_size
_a : List[str] =max_position_embeddings
_a : Optional[Any] =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[str] =num_attention_heads
_a : int =intermediate_size
_a : Any =hidden_act
_a : Dict =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : str =initializer_range
_a : Optional[int] =type_vocab_size
_a : Any =layer_norm_eps
_a : Any =use_cache
_a : Optional[int] =enable_pronunciation
_a : Optional[Any] =enable_shape
_a : Optional[Any] =pronunciation_embed_dim
_a : Tuple =pronunciation_vocab_size
_a : Union[str, Any] =shape_embed_dim
_a : Any =shape_vocab_size
_a : Tuple =concat_input
_a : List[str] =position_embedding_type
_a : List[str] =classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 276 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _UpperCAmelCase :
def __init__( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str]=99 , lowercase_ : List[str]=13 , lowercase_ : Optional[int]=7 , lowercase_ : Tuple=9 , lowercase_ : Any=True , lowercase_ : str=True , lowercase_ : Optional[Any]=False , lowercase_ : str=32 , lowercase_ : List[str]=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Tuple=37 , lowercase_ : Optional[int]=8 , lowercase_ : str=0.1 , lowercase_ : Tuple=0.0_02 , lowercase_ : Optional[Any]=1 , lowercase_ : Optional[int]=0 , lowercase_ : Tuple=0 , lowercase_ : str=None , lowercase_ : Optional[Any]=None , ):
snake_case_ : int = parent
snake_case_ : Tuple = batch_size
snake_case_ : List[Any] = encoder_seq_length
snake_case_ : List[str] = decoder_seq_length
# For common tests
snake_case_ : str = self.decoder_seq_length
snake_case_ : Optional[int] = is_training
snake_case_ : List[str] = use_attention_mask
snake_case_ : Union[str, Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : int = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : List[str] = d_ff
snake_case_ : Tuple = relative_attention_num_buckets
snake_case_ : List[Any] = dropout_rate
snake_case_ : List[str] = initializer_factor
snake_case_ : str = eos_token_id
snake_case_ : Union[str, Any] = pad_token_id
snake_case_ : Any = decoder_start_token_id
snake_case_ : Union[str, Any] = None
snake_case_ : int = decoder_layers
def _snake_case ( self : int ):
return TaConfig.from_pretrained('''google/umt5-base''' )
def _snake_case ( self : List[Any] , lowercase_ : Any , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : int=None , lowercase_ : List[str]=None , ):
if attention_mask is None:
snake_case_ : str = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case_ : str = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case_ : Optional[int] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowercase_ )
if decoder_head_mask is None:
snake_case_ : int = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowercase_ )
if cross_attn_head_mask is None:
snake_case_ : Tuple = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowercase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _snake_case ( self : Union[str, Any] ):
snake_case_ : List[str] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case_ : Optional[int] = input_ids.clamp(self.pad_token_id + 1 )
snake_case_ : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case_ : List[Any] = self.get_config()
snake_case_ : int = config.num_attention_heads
snake_case_ : Union[str, Any] = self.prepare_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, input_dict
def _snake_case ( self : int ):
snake_case_ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : List[Any] ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : Dict ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Tuple , ):
snake_case_ : Optional[Any] = UMTaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[int] = model(
input_ids=lowercase_ , decoder_input_ids=lowercase_ , attention_mask=lowercase_ , decoder_attention_mask=lowercase_ , )
snake_case_ : Optional[int] = model(input_ids=lowercase_ , decoder_input_ids=lowercase_ )
snake_case_ : Any = result.last_hidden_state
snake_case_ : List[Any] = result.past_key_values
snake_case_ : List[str] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowercase_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _snake_case ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : List[str] , ):
snake_case_ : Any = UMTaModel(config=lowercase_ ).get_decoder().to(lowercase_ ).eval()
# first forward pass
snake_case_ : Tuple = model(lowercase_ , use_cache=lowercase_ )
snake_case_ : List[Any] = model(lowercase_ )
snake_case_ : str = model(lowercase_ , use_cache=lowercase_ )
self.parent.assertTrue(len(lowercase_ ) == len(lowercase_ ) )
self.parent.assertTrue(len(lowercase_ ) == len(lowercase_ ) + 1 )
snake_case_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ : str = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ : Optional[Any] = model(lowercase_ )["""last_hidden_state"""]
snake_case_ : Optional[int] = model(lowercase_ , past_key_values=lowercase_ )["""last_hidden_state"""]
# select random slice
snake_case_ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ : Optional[int] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case_ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def _snake_case ( self : Dict , lowercase_ : Dict , lowercase_ : str , ):
snake_case_ : Dict = UMTaModel(config=lowercase_ ).to(lowercase_ ).half().eval()
snake_case_ : int = model(**lowercase_ )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(lowercase_ ).any().item() )
@require_torch
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Any = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_lowerCAmelCase : Optional[int] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Any = True
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : int = False
_lowerCAmelCase : int = True
_lowerCAmelCase : str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_lowerCAmelCase : Optional[Any] = [0.8, 0.9]
def _snake_case ( self : Optional[Any] ):
snake_case_ : Tuple = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def _snake_case ( self : Dict ):
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
snake_case_ : Union[str, Any] = UMTaModel(config_and_inputs[0] ).to(lowercase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowercase_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"{tmpdirname}/t5_test.onnx" , export_params=lowercase_ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def _snake_case ( self : Optional[int] ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowercase_ )
def _snake_case ( self : List[Any] ):
snake_case_ : Any = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
snake_case_ : int = config_and_inputs[0]
snake_case_ : Tuple = UMTaForConditionalGeneration(lowercase_ ).eval()
model.to(lowercase_ )
snake_case_ : List[Any] = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=lowercase_ ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase_ ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase_ ),
}
for attn_name, (name, mask) in zip(lowercase_ , head_masking.items() ):
snake_case_ : str = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case_ : Optional[Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowercase_ )
snake_case_ : Union[str, Any] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=lowercase_ , return_dict_in_generate=lowercase_ , **lowercase_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case_ : int = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def _snake_case ( self : List[Any] ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def _snake_case ( self : Tuple ):
snake_case_ : Tuple = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=lowercase_ ).to(lowercase_ )
snake_case_ : Dict = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=lowercase_ , legacy=lowercase_ )
snake_case_ : Tuple = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
snake_case_ : int = tokenizer(lowercase_ , return_tensors='''pt''' , padding=lowercase_ ).input_ids
# fmt: off
snake_case_ : Optional[int] = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowercase_ , lowercase_ )
snake_case_ : Tuple = model.generate(input_ids.to(lowercase_ ) )
snake_case_ : Union[str, Any] = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
snake_case_ : Tuple = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
| 264 |
'''simple docstring'''
class A__ :
def __init__( self :List[str] ) -> List[Any]:
'''simple docstring'''
_a : Tuple =0
_a : Any =0
_a : int ={}
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :List[str] ) -> Optional[int]:
'''simple docstring'''
if vertex not in self.adjacency:
_a : Dict ={}
self.num_vertices += 1
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Any ) -> List[str]:
'''simple docstring'''
self.add_vertex(SCREAMING_SNAKE_CASE )
self.add_vertex(SCREAMING_SNAKE_CASE )
if head == tail:
return
_a : Any =weight
_a : Tuple =weight
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =self.get_edges()
for edge in edges:
_a , _a , _a : List[str] =edge
edges.remove((tail, head, weight) )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
_a : str =list(edges[i] )
edges.sort(key=lambda SCREAMING_SNAKE_CASE : e[2] )
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a : Union[str, Any] =edges[i][2] + 1
for edge in edges:
_a , _a , _a : Tuple =edge
_a : Tuple =weight
_a : List[Any] =weight
def __str__( self :int ) -> str:
'''simple docstring'''
_a : int =""""""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a : str =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("""\n""" )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : Union[str, Any] =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[Any]=None ) -> Optional[int]:
'''simple docstring'''
_a : str =Graph()
if vertices is None:
_a : Union[str, Any] =[]
if edges is None:
_a : List[Any] =[]
for vertex in vertices:
g.add_vertex(SCREAMING_SNAKE_CASE )
for edge in edges:
g.add_edge(*SCREAMING_SNAKE_CASE )
return g
class A__ :
def __init__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_a : Optional[int] ={}
_a : List[str] ={}
def __len__( self :List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.parent )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Tuple ) -> Dict:
'''simple docstring'''
if item in self.parent:
return self.find(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =item
_a : List[str] =0
return item
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Dict ) -> List[str]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(SCREAMING_SNAKE_CASE )
if item != self.parent[item]:
_a : str =self.find(self.parent[item] )
return self.parent[item]
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :List[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[int] =self.find(SCREAMING_SNAKE_CASE )
_a : Dict =self.find(SCREAMING_SNAKE_CASE )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a : Any =roota
return roota
if self.rank[roota] < self.rank[roota]:
_a : List[str] =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a : List[Any] =roota
return roota
return None
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict ) -> Union[str, Any]:
'''simple docstring'''
_a : Any =graph.num_vertices
_a : Union[str, Any] =Graph.UnionFind()
_a : Optional[int] =[]
while num_components > 1:
_a : str ={}
for vertex in graph.get_vertices():
_a : List[str] =-1
_a : Any =graph.get_edges()
for edge in edges:
_a , _a , _a : Tuple =edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a : Any =edge
_a : Any =union_find.find(SCREAMING_SNAKE_CASE )
_a : List[Any] =union_find.find(SCREAMING_SNAKE_CASE )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a : Optional[int] =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a : List[Any] =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a : Optional[Any] =cheap_edge[vertex]
if union_find.find(SCREAMING_SNAKE_CASE ) != union_find.find(SCREAMING_SNAKE_CASE ):
union_find.union(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
mst_edges.append(cheap_edge[vertex] )
_a : str =num_components - 1
_a : str =Graph.build(edges=SCREAMING_SNAKE_CASE )
return mst
| 276 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A__: Union[str, Any] = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
A__: Tuple = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
A__: Union[str, Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
A__: List[Any] = requests.get(image_url).content
A__: List[str] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 276 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def A ( snake_case :Namespace ) -> Union[str, Any]:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCamelCase : str = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class __lowerCAmelCase ( UpperCAmelCase__ ):
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=__UpperCAmelCase , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=__UpperCAmelCase )
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = logging.get_logger('transformers-cli/converting' )
self._logger.info(F'Loading model {model_type}' )
__UpperCamelCase = model_type
__UpperCamelCase = tf_checkpoint
__UpperCamelCase = pytorch_dump_output
__UpperCamelCase = config
__UpperCamelCase = finetuning_task_name
def UpperCAmelCase ( self ):
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
__UpperCamelCase = self._tf_checkpoint
__UpperCamelCase = """"""
else:
__UpperCamelCase = self._tf_checkpoint
__UpperCamelCase = """"""
convert_transfo_xl_checkpoint_to_pytorch(
__UpperCAmelCase , self._config , self._pytorch_dump_output , __UpperCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 316 |
'''simple docstring'''
A__: Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 276 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _a ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
lowerCAmelCase__ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
return image
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ) -> str:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(_UpperCAmelCase )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCAmelCase__ = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
lowerCAmelCase__ = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
lowerCAmelCase__ = torch.cat((q_bias, torch.zeros_like(_UpperCAmelCase , requires_grad=_UpperCAmelCase ), v_bias) )
lowerCAmelCase__ = qkv_bias
def _a ( UpperCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = 364 if """coco""" in model_name else 224
lowerCAmelCase__ = InstructBlipVisionConfig(image_size=_UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowerCAmelCase__ = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCAmelCase__ = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowerCAmelCase__ = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
lowerCAmelCase__ = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowerCAmelCase__ = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
lowerCAmelCase__ = InstructBlipConfig(vision_config=_UpperCAmelCase , text_config=_UpperCAmelCase , qformer_config=_UpperCAmelCase )
return config, image_size
@torch.no_grad()
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[str]=False ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
lowerCAmelCase__ = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowerCAmelCase__ = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
lowerCAmelCase__ = get_blipa_config(_UpperCAmelCase )
lowerCAmelCase__ = InstructBlipForConditionalGeneration(_UpperCAmelCase ).eval()
lowerCAmelCase__ = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
lowerCAmelCase__ = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
lowerCAmelCase__ = """cuda:1""" if torch.cuda.is_available() else """cpu"""
lowerCAmelCase__ = """cuda:2""" if torch.cuda.is_available() else """cpu"""
lowerCAmelCase__ = load_model_and_preprocess(
name=_UpperCAmelCase , model_type=_UpperCAmelCase , is_eval=_UpperCAmelCase , device=_UpperCAmelCase )
original_model.eval()
print("Done!" )
# update state dict keys
lowerCAmelCase__ = original_model.state_dict()
lowerCAmelCase__ = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCAmelCase__ = state_dict.pop(_UpperCAmelCase )
if key.startswith("Qformer.bert" ):
lowerCAmelCase__ = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
lowerCAmelCase__ = key.replace("self" , "attention" )
if "llm_proj" in key:
lowerCAmelCase__ = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
lowerCAmelCase__ = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
lowerCAmelCase__ = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
lowerCAmelCase__ = key.replace("t5" , "language" )
lowerCAmelCase__ = val
# read in qv biases
read_in_q_v_bias(_UpperCAmelCase , _UpperCAmelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
lowerCAmelCase__ = load_demo_image()
lowerCAmelCase__ = """What is unusual about this image?"""
# create processor
lowerCAmelCase__ = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase )
lowerCAmelCase__ = InstructBlipProcessor(
image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase , )
lowerCAmelCase__ = processor(images=_UpperCAmelCase , text=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# make sure processor creates exact same pixel values
lowerCAmelCase__ = vis_processors["""eval"""](_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
lowerCAmelCase__ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , _UpperCAmelCase )
original_model.to(_UpperCAmelCase )
hf_model.to(_UpperCAmelCase )
with torch.no_grad():
if "vicuna" in model_name:
lowerCAmelCase__ = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
lowerCAmelCase__ = hf_model(**_UpperCAmelCase ).logits
else:
lowerCAmelCase__ = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
lowerCAmelCase__ = tokenizer("\n" , return_tensors="pt" ).input_ids.to(_UpperCAmelCase )
lowerCAmelCase__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
lowerCAmelCase__ = hf_model(**_UpperCAmelCase , labels=_UpperCAmelCase ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowerCAmelCase__ = 1e-4 if """vicuna""" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , _UpperCAmelCase , atol=_UpperCAmelCase )
print("Looks ok!" )
print("Generating with original model..." )
lowerCAmelCase__ = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
lowerCAmelCase__ = hf_model.generate(
**_UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowerCAmelCase__ = 2
print("Original generation:" , _UpperCAmelCase )
lowerCAmelCase__ = processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
lowerCAmelCase__ = [text.strip() for text in output_text]
print("HF generation:" , _UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_UpperCAmelCase )
hf_model.save_pretrained(_UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(F"Salesforce/{model_name}" )
hf_model.push_to_hub(F"Salesforce/{model_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
a_ = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
a_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 340 |
'''simple docstring'''
A__: Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__: Any = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__: int = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> str:
assert len(str(_UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_a : List[str] =year // 100
_a : List[str] =(5 * (century % 4) + 2) % 7
_a : Optional[int] =year % 100
_a : Any =centurian % 12
_a : int =(
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_a : Optional[Any] =(
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_a : str =(dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE (UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase = KandinskyImgaImgPipeline
lowerCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
lowerCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
lowerCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase = False
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return 100
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
torch.manual_seed(0)
__A : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__A : Dict = MultilingualCLIP(_UpperCAmelCase)
__A : Optional[int] = text_encoder.eval()
return text_encoder
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
torch.manual_seed(0)
__A : Optional[Any] = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__A : Optional[int] = UNetaDConditionModel(**_UpperCAmelCase)
return model
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
torch.manual_seed(0)
__A : int = VQModel(**self.dummy_movq_kwargs)
return model
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.dummy_text_encoder
__A : List[Any] = self.dummy_tokenizer
__A : Optional[int] = self.dummy_unet
__A : str = self.dummy_movq
__A : int = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__A : Union[str, Any] = DDIMScheduler(**_UpperCAmelCase)
__A : List[str] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=0):
'''simple docstring'''
__A : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_UpperCAmelCase)).to(_UpperCAmelCase)
__A : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(_UpperCAmelCase)
# create init_image
__A : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase)).to(_UpperCAmelCase)
__A : Dict = image.cpu().permute(0 , 2 , 3 , 1)[0]
__A : Union[str, Any] = Image.fromarray(np.uinta(_UpperCAmelCase)).convert('RGB').resize((256, 256))
if str(_UpperCAmelCase).startswith('mps'):
__A : Optional[Any] = torch.manual_seed(_UpperCAmelCase)
else:
__A : Tuple = torch.Generator(device=_UpperCAmelCase).manual_seed(_UpperCAmelCase)
__A : Dict = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = """cpu"""
__A : Union[str, Any] = self.get_dummy_components()
__A : Optional[Any] = self.pipeline_class(**_UpperCAmelCase)
__A : int = pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : str = pipe(**self.get_dummy_inputs(_UpperCAmelCase))
__A : Optional[Any] = output.images
__A : Tuple = pipe(
**self.get_dummy_inputs(_UpperCAmelCase) , return_dict=_UpperCAmelCase , )[0]
__A : str = image[0, -3:, -3:, -1]
__A : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__A : int = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy')
__A : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
__A : Any = """A red cartoon frog, 4k"""
__A : int = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa)
pipe_prior.to(_UpperCAmelCase)
__A : Optional[int] = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa)
__A : int = pipeline.to(_UpperCAmelCase)
pipeline.set_progress_bar_config(disable=_UpperCAmelCase)
__A : Tuple = torch.Generator(device='cpu').manual_seed(0)
__A : Any = pipe_prior(
_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__A : int = pipeline(
_UpperCAmelCase , image=_UpperCAmelCase , image_embeds=_UpperCAmelCase , negative_image_embeds=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
__A : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase) | 190 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str
__UpperCamelCase : int
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> list[str]:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(_UpperCAmelCase ) )]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_a : List[Any] =all_rotations(_UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_a : BWTTransformDict ={
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCAmelCase ),
}
return response
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_a : List[str] =int(_UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(_UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_a : Optional[int] =[""""""] * len(_UpperCAmelCase )
for _ in range(len(_UpperCAmelCase ) ):
for i in range(len(_UpperCAmelCase ) ):
_a : int =bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
A__: Any = '''Provide a string that I will generate its BWT transform: '''
A__: Union[str, Any] = input(entry_msg).strip()
A__: Optional[int] = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
A__: Union[str, Any] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 276 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Any = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __lowercase (UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = ReformerTokenizer
_snake_case = ReformerTokenizerFast
_snake_case = True
_snake_case = False
_snake_case = True
def UpperCAmelCase ( self ) -> Any:
super().setUp()
snake_case : Union[str, Any] = ReformerTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self ) -> Dict:
snake_case : List[str] = """<s>"""
snake_case : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(A ) , 1_0_0_0 )
def UpperCAmelCase ( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCAmelCase ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
snake_case : Tuple = self.get_tokenizer()
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : List[Any] = """I was born in 92000, and this is falsé."""
snake_case : Union[str, Any] = tokenizer.tokenize(A )
snake_case : str = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
snake_case : Dict = tokenizer.encode(A , add_special_tokens=A )
snake_case : Union[str, Any] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
snake_case : str = self.get_rust_tokenizer()
snake_case : int = tokenizer.encode(A )
snake_case : Optional[int] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def UpperCAmelCase ( self , A=1_5 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : Dict = self.rust_tokenizer_class.from_pretrained(A , **A )
# Simple input
snake_case : List[Any] = """This is a simple input"""
snake_case : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
snake_case : Tuple = ("""This is a simple input""", """This is a pair""")
snake_case : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding="""max_length""" )
# Simple input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding="""max_length""" )
# Simple input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding="""max_length""" , )
# Pair input
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding="""max_length""" )
# Pair input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding="""max_length""" )
# Pair input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding="""max_length""" , )
def UpperCAmelCase ( self ) -> Optional[int]:
pass
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : List[str] = ReformerTokenizer(A , keep_accents=A )
snake_case : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
snake_case : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case : List[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase ( self ) -> List[str]:
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def UpperCAmelCase ( self ) -> int:
snake_case : Any = """Hello World!"""
snake_case : List[str] = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Tuple = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
snake_case : Optional[Any] = [
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@require_torch
@slow
def UpperCAmelCase ( self ) -> List[str]:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case : Dict = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
snake_case : Dict = """ """.join(A )
snake_case : Optional[int] = self.big_tokenizer.encode_plus(A , return_tensors="""pt""" )
snake_case : Optional[int] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
snake_case : str = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case : Union[str, Any] = encoded_sequence["""input_ids"""].shape
snake_case : List[str] = ReformerModel(A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**A )
model(**A )
@slow
def UpperCAmelCase ( self ) -> str:
snake_case : Any = {"""input_ids""": [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case : str = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=A , sequences=A , )
| 124 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__: List[str] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Optional[int] = ['''ChineseCLIPFeatureExtractor''']
A__: Any = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Dict = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276 | 0 |
class lowercase_ :
"""simple docstring"""
def __init__( self ) ->None:
lowerCAmelCase = {} # Mapping from char to TrieNode
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->None:
for word in words:
self.insert(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->None:
lowerCAmelCase = self
for char in word:
if char not in curr.nodes:
lowerCAmelCase = TrieNode()
lowerCAmelCase = curr.nodes[char]
lowerCAmelCase = True
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->bool:
lowerCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
lowerCAmelCase = curr.nodes[char]
return curr.is_leaf
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->None:
def _delete(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
if index == len(__SCREAMING_SNAKE_CASE ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCAmelCase = False
return len(curr.nodes ) == 0
lowerCAmelCase = word[index]
lowerCAmelCase = curr.nodes.get(__SCREAMING_SNAKE_CASE )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCAmelCase = _delete(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __SCREAMING_SNAKE_CASE , 0 )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> None:
if node.is_leaf:
print(_UpperCAmelCase , end=''' ''' )
for key, value in node.nodes.items():
print_words(_UpperCAmelCase , word + key )
def SCREAMING_SNAKE_CASE_ ( ) -> bool:
lowerCAmelCase = """banana bananas bandana band apple all beast""".split()
lowerCAmelCase = TrieNode()
root.insert_many(_UpperCAmelCase )
# print_words(root, "")
assert all(root.find(_UpperCAmelCase ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> None:
print(str(_UpperCAmelCase ) , '''works!''' if passes else '''doesn\'t work :(''' )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE_ ( ) -> None:
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 338 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
_a : dict[str, TrieNode] ={} # Mapping from char to TrieNode
_a : List[str] =False
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
_a : str =self
for char in word:
if char not in curr.nodes:
_a : Dict =TrieNode()
_a : List[Any] =curr.nodes[char]
_a : int =True
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> bool:
'''simple docstring'''
_a : int =self
for char in word:
if char not in curr.nodes:
return False
_a : List[Any] =curr.nodes[char]
return curr.is_leaf
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
def _delete(SCREAMING_SNAKE_CASE :TrieNode , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :int ) -> bool:
if index == len(SCREAMING_SNAKE_CASE ):
# If word does not exist
if not curr.is_leaf:
return False
_a : Any =False
return len(curr.nodes ) == 0
_a : int =word[index]
_a : int =curr.nodes.get(SCREAMING_SNAKE_CASE )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_a : List[Any] =_delete(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , SCREAMING_SNAKE_CASE , 0 )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : TrieNode ,_UpperCAmelCase : str ) -> None:
if node.is_leaf:
print(_UpperCAmelCase ,end=""" """ )
for key, value in node.nodes.items():
print_words(_UpperCAmelCase ,word + key )
def SCREAMING_SNAKE_CASE_ ( ) -> bool:
_a : List[str] ="""banana bananas bandana band apple all beast""".split()
_a : List[Any] =TrieNode()
root.insert_many(_UpperCAmelCase )
# print_words(root, "")
assert all(root.find(_UpperCAmelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : bool ) -> None:
print(str(_UpperCAmelCase ) ,"""works!""" if passes else """doesn't work :(""" )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE_ ( ) -> None:
print_results("""Testing trie functionality""" ,test_trie() )
if __name__ == "__main__":
main()
| 276 | 0 |
def _a ( SCREAMING_SNAKE_CASE_ : str ):
__lowerCAmelCase = [0 for i in range(len(_UpperCAmelCase ) )]
# initialize interval's left pointer and right pointer
__lowerCAmelCase = 0, 0
for i in range(1 , len(_UpperCAmelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
__lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__lowerCAmelCase = min_edge
while go_next(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : str ):
return i + z_result[i] < len(_UpperCAmelCase ) and s[z_result[i]] == s[i + z_result[i]]
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
__lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_UpperCAmelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A__: str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276 | 0 |
import logging
import os
from .state import PartialState
class _a (logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def __A ( A__ ):
A__ : List[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __A ( self , A__ , A__ , *A__ , **A__ ):
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
A__ : List[str] = kwargs.pop("""main_process_only""" , A__ )
A__ : List[str] = kwargs.pop("""in_order""" , A__ )
if self.isEnabledFor(A__ ):
if self._should_log(A__ ):
A__ : Any = self.process(A__ , A__ )
self.logger.log(A__ , A__ , *A__ , **A__ )
elif in_order:
A__ : Union[str, Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
A__ : Optional[Any] = self.process(A__ , A__ )
self.logger.log(A__ , A__ , *A__ , **A__ )
state.wait_for_everyone()
def UpperCamelCase (lowercase_: str , lowercase_: str = None ) -> Optional[int]:
if log_level is None:
A__ : Union[str, Any] = os.environ.get("""ACCELERATE_LOG_LEVEL""" , _UpperCAmelCase )
A__ : Union[str, Any] = logging.getLogger(_UpperCAmelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_UpperCAmelCase , {} )
| 192 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
A__: str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : int ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Optional[Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
_a : Union[str, Any] =tensor_name.split(""".""" )
for split in splits[:-1]:
_a : Optional[Any] =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_a : Optional[int] =new_module
_a : Optional[int] =splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
_a : Optional[Any] =tensor_name in module._buffers
_a : str =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
_a : int =False
_a : Tuple =False
if is_buffer or not is_bitsandbytes_available():
_a : str =False
_a : Optional[Any] =False
else:
_a : int =hasattr(bnb.nn ,"""Params4bit""" ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
_a : int =isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
_a : Any =module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_a : int =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : str =value.to("""cpu""" )
if value.dtype == torch.inta:
_a : int =version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
_a : Dict =torch.tensor(_UpperCAmelCase ,device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_UpperCAmelCase ) and fpaa_statistics is None:
_a : int =new_value.T
_a : Any =old_value.__dict__
if is_abit:
_a : Any =bnb.nn.IntaParams(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
elif is_abit:
_a : Union[str, Any] =bnb.nn.Paramsabit(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
_a : List[Any] =new_value
if fpaa_statistics is not None:
setattr(module.weight ,"""SCB""" ,fpaa_statistics.to(_UpperCAmelCase ) )
else:
if value is None:
_a : str =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : Any =value.to(_UpperCAmelCase )
else:
_a : str =torch.tensor(_UpperCAmelCase ,device=_UpperCAmelCase )
if is_buffer:
_a : Optional[int] =new_value
else:
_a : Optional[Any] =nn.Parameter(_UpperCAmelCase ,requires_grad=old_value.requires_grad )
_a : Tuple =new_value
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : str=None ,_UpperCAmelCase : Union[str, Any]=False ) -> Dict:
for name, module in model.named_children():
if current_key_name is None:
_a : Optional[int] =[]
current_key_name.append(_UpperCAmelCase )
if (isinstance(_UpperCAmelCase ,nn.Linear ) or isinstance(_UpperCAmelCase ,_UpperCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(_UpperCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a , _a : int =module.weight.shape
else:
_a : List[str] =module.in_features
_a : Tuple =module.out_features
if quantization_config.quantization_method() == "llm_int8":
_a : Optional[Any] =bnb.nn.LinearabitLt(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
_a : Optional[Any] =True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_a : Dict =bnb.nn.Linearabit(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
_a : List[Any] =True
# Store the module class in case we need to transpose the weight later
_a : int =type(_UpperCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_UpperCAmelCase )
if len(list(module.children() ) ) > 0:
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,has_been_replaced=_UpperCAmelCase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Any=None ) -> Tuple:
_a : Dict =["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : Any ,**_UpperCAmelCase : Any ) -> str:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" ,_UpperCAmelCase ,)
return replace_with_bnb_linear(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : str ,**_UpperCAmelCase : Optional[int] ) -> Optional[int]:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" ,_UpperCAmelCase ,)
return set_module_quantized_tensor_to_device(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> Union[str, Any]:
_a : Any =deepcopy(_UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_a : List[Any] =find_tied_parameters(_UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str =sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
_a : Optional[int] =sum(_UpperCAmelCase ,[] )
_a : List[Any] =len(_UpperCAmelCase ) > 0
# Check if it is a base model
_a : Tuple =not hasattr(_UpperCAmelCase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_a : List[Any] =list(model.named_children() )
_a : Dict =[list_modules[-1][0]]
# add last module together with tied weights
_a : List[str] =set(_UpperCAmelCase ) - set(_UpperCAmelCase )
_a : str =list(set(_UpperCAmelCase ) ) + list(_UpperCAmelCase )
# remove ".weight" from the keys
_a : List[Any] =[""".weight""", """.bias"""]
_a : Any =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_a : Any =name.replace(_UpperCAmelCase ,"""""" )
filtered_module_names.append(_UpperCAmelCase )
return filtered_module_names
| 276 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = 0
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCamelCase : int = AutoTokenizer.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(A_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(A_ ) , 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(A_ )
self.assertIsInstance(A_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = AutoTokenizer.from_pretrained(A_ )
self.assertIsInstance(A_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = AutoConfig.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
# Check that tokenizer_type ≠ model_type
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(A_ , config=A_ )
self.assertIsInstance(A_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __UpperCamelCase( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(A_ , "vocab.txt" ) )
UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(A_ , tokenizer_type="bert" , use_fast=A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(A_ , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(A_ , "merges.txt" ) )
UpperCamelCase : int = AutoTokenizer.from_pretrained(A_ , tokenizer_type="gpt2" , use_fast=A_ )
self.assertIsInstance(A_ , A_ )
@require_tokenizers
def __UpperCamelCase( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(A_ , "vocab.txt" ) )
UpperCamelCase : int = AutoTokenizer.from_pretrained(A_ , tokenizer_type="bert" )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(A_ , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(A_ , "merges.txt" ) )
UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(A_ , tokenizer_type="gpt2" )
self.assertIsInstance(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
with pytest.raises(A_ ):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" )
@require_tokenizers
def __UpperCamelCase( self ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(A_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(A_ , A_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , A_ )
else:
self.assertEqual(tokenizer.do_lower_case , A_ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def __UpperCamelCase( self ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
A_ , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
UpperCamelCase : Any = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = TOKENIZER_MAPPING.values()
UpperCamelCase : Tuple = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(A_ )
@require_tokenizers
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=A_ ) , A_ )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , A_ )
@require_tokenizers
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=A_ )
UpperCamelCase : List[Any] = """Hello, world. How are you?"""
UpperCamelCase : Optional[int] = tokenizer.tokenize(A_ )
self.assertEqual("[UNK]" , tokens[0] )
UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=A_ )
UpperCamelCase : int = tokenizer.tokenize(A_ )
self.assertEqual("[UNK]" , tokens[0] )
@require_tokenizers
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(A_ ) , A_ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , "[UNK]" )
self.assertEqual(tokenizer.padding_side , "right" )
self.assertEqual(tokenizer.truncation_side , "right" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = AutoTokenizer.from_pretrained(A_ )
self.assertIsInstance(A_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ )
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(A_ )
self.assertIsInstance(A_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = get_tokenizer_config("bert-base-cased" )
UpperCamelCase : Any = config.pop("_commit_hash" , A_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(A_ , {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCamelCase : List[str] = get_tokenizer_config(A_ )
self.assertDictEqual(A_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCamelCase : str = AutoTokenizer.from_pretrained(A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ )
UpperCamelCase : Any = get_tokenizer_config(A_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer" )
def __UpperCamelCase( self ):
'''simple docstring'''
try:
AutoConfig.register("custom" , A_ )
AutoTokenizer.register(A_ , slow_tokenizer_class=A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
AutoTokenizer.register(A_ , slow_tokenizer_class=A_ )
UpperCamelCase : List[str] = CustomTokenizer.from_pretrained(A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ )
UpperCamelCase : Any = AutoTokenizer.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __UpperCamelCase( self ):
'''simple docstring'''
try:
AutoConfig.register("custom" , A_ )
# Can register in two steps
AutoTokenizer.register(A_ , slow_tokenizer_class=A_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(A_ , fast_tokenizer_class=A_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
A_ , slow_tokenizer_class=A_ , fast_tokenizer_class=A_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
AutoTokenizer.register(A_ , fast_tokenizer_class=A_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase : int = BertTokenizerFast.from_pretrained(A_ )
bert_tokenizer.save_pretrained(A_ )
UpperCamelCase : int = CustomTokenizerFast.from_pretrained(A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ )
UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
UpperCamelCase : Any = AutoTokenizer.from_pretrained(A_ , use_fast=A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __UpperCamelCase( self ):
'''simple docstring'''
with self.assertRaises(A_ ):
UpperCamelCase : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A_ ):
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=A_ )
UpperCamelCase : Any = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=A_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ )
UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(A_ , trust_remote_code=A_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=A_ , use_fast=A_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ )
UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(A_ , trust_remote_code=A_ , use_fast=A_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
@require_tokenizers
def __UpperCamelCase( self ):
'''simple docstring'''
class A__ ( UpperCAmelCase__ ):
_UpperCAmelCase :str = False
class A__ ( UpperCAmelCase__ ):
_UpperCAmelCase :List[str] = NewTokenizer
_UpperCAmelCase :Tuple = False
try:
AutoConfig.register("custom" , A_ )
AutoTokenizer.register(A_ , slow_tokenizer_class=A_ )
AutoTokenizer.register(A_ , fast_tokenizer_class=A_ )
# If remote code is not set, the default is to use local
UpperCamelCase : str = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=A_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=A_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=A_ , use_fast=A_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=A_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
UpperCamelCase : Dict = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=A_ , use_fast=A_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=A_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
UpperCamelCase : str = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=A_ , use_fast=A_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def __UpperCamelCase( self ):
'''simple docstring'''
with self.assertRaisesRegex(
A_ , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCamelCase : Dict = AutoTokenizer.from_pretrained("bert-base" )
def __UpperCamelCase( self ):
'''simple docstring'''
with self.assertRaisesRegex(
A_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCamelCase : int = AutoTokenizer.from_pretrained(A_ , revision="aaaaaa" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 52 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A__: int = logging.getLogger(__name__)
@dataclass
class A__ :
__UpperCamelCase : str
__UpperCamelCase : List[str]
__UpperCamelCase : Optional[List[str]]
@dataclass
class A__ :
__UpperCamelCase : List[int]
__UpperCamelCase : List[int]
__UpperCamelCase : Optional[List[int]] = None
__UpperCamelCase : Optional[List[int]] = None
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = "train"
__UpperCamelCase : Tuple = "dev"
__UpperCamelCase : str = "test"
class A__ :
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[Split, str] ) -> List[InputExample]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :List[InputExample] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Optional[Any]="[CLS]" , SCREAMING_SNAKE_CASE :Optional[int]=1 , SCREAMING_SNAKE_CASE :Any="[SEP]" , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Union[str, Any]=False , SCREAMING_SNAKE_CASE :List[str]=0 , SCREAMING_SNAKE_CASE :str=0 , SCREAMING_SNAKE_CASE :Dict=-1_0_0 , SCREAMING_SNAKE_CASE :Optional[int]=0 , SCREAMING_SNAKE_CASE :Tuple=True , ) -> List[InputFeatures]:
'''simple docstring'''
_a : str ={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )}
_a : Tuple =[]
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
_a : Optional[Any] =[]
_a : List[Any] =[]
for word, label in zip(example.words , example.labels ):
_a : Optional[int] =tokenizer.tokenize(SCREAMING_SNAKE_CASE )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_a : Optional[int] =tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE ) > max_seq_length - special_tokens_count:
_a : List[Any] =tokens[: (max_seq_length - special_tokens_count)]
_a : Tuple =label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_a : Dict =[sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_a : Any =[cls_token] + tokens
_a : Dict =[pad_token_label_id] + label_ids
_a : Union[str, Any] =[cls_token_segment_id] + segment_ids
_a : List[str] =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_a : Optional[int] =[1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE )
# Zero-pad up to the sequence length.
_a : Union[str, Any] =max_seq_length - len(SCREAMING_SNAKE_CASE )
if pad_on_left:
_a : Optional[Any] =([pad_token] * padding_length) + input_ids
_a : Optional[int] =([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_a : Union[str, Any] =([pad_token_segment_id] * padding_length) + segment_ids
_a : Dict =([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_a : Tuple =None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self :Dict , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :int=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> List[str]:
'''simple docstring'''
# Load data features from cache or dataset file
_a : Optional[Any] =os.path.join(
SCREAMING_SNAKE_CASE , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : List[str] =cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE ):
if os.path.exists(SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
_a : Any =torch.load(SCREAMING_SNAKE_CASE )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
_a : Any =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[str] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , SCREAMING_SNAKE_CASE )
def __len__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :Dict , SCREAMING_SNAKE_CASE :int ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class A__ :
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = -100
def __init__( self :str , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> Any:
'''simple docstring'''
_a : Tuple =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[Any] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
_a : List[Any] =self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self :str ) -> Optional[int]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :int , SCREAMING_SNAKE_CASE :str ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
| 276 | 0 |
"""simple docstring"""
import numpy as np
def UpperCAmelCase__ (snake_case__ : np.ndarray , snake_case__ : float ):
"""simple docstring"""
return np.where(vector > 0 , _UpperCAmelCase , (alpha * (np.exp(_UpperCAmelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> Optional[int]:
'''simple docstring'''
_a , _a : List[str] =text, pattern
_a , _a : Union[str, Any] =len(SCREAMING_SNAKE_CASE ), len(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __UpperCAmelCase ( self :Union[str, Any] ) -> list[int]:
'''simple docstring'''
# searches pattern in text and returns index positions
_a : Union[str, Any] =[]
for i in range(self.textLen - self.patLen + 1 ):
_a : Any =self.mismatch_in_text(SCREAMING_SNAKE_CASE )
if mismatch_index == -1:
positions.append(SCREAMING_SNAKE_CASE )
else:
_a : int =self.match_in_pattern(self.text[mismatch_index] )
_a : List[str] =(
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A__: Any = '''ABAABA'''
A__: int = '''AB'''
A__: Optional[int] = BoyerMooreSearch(text, pattern)
A__: Optional[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 276 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a , _a , _a , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
A__: Dict = None
A__: Tuple = {
'''7B''': 1_1008,
'''13B''': 1_3824,
'''30B''': 1_7920,
'''65B''': 2_2016,
'''70B''': 2_8672,
}
A__: Any = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int]=1 ,_UpperCAmelCase : List[str]=256 ) -> Dict:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ) -> List[str]:
with open(_UpperCAmelCase ,"""r""" ) as f:
return json.load(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ) -> Tuple:
with open(_UpperCAmelCase ,"""w""" ) as f:
json.dump(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any]=True ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
_a : Union[str, Any] =os.path.join(_UpperCAmelCase ,"""tmp""" )
os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
_a : int =read_json(os.path.join(_UpperCAmelCase ,"""params.json""" ) )
_a : int =NUM_SHARDS[model_size]
_a : Dict =params["""n_layers"""]
_a : Union[str, Any] =params["""n_heads"""]
_a : List[str] =n_heads // num_shards
_a : int =params["""dim"""]
_a : Union[str, Any] =dim // n_heads
_a : int =1_0_0_0_0.0
_a : str =1.0 / (base ** (torch.arange(0 ,_UpperCAmelCase ,2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_a : str =params["""n_kv_heads"""] # for GQA / MQA
_a : Optional[Any] =n_heads_per_shard // num_key_value_heads
_a : Optional[int] =dim // num_key_value_heads
else: # compatibility with other checkpoints
_a : str =n_heads
_a : Any =n_heads_per_shard
_a : str =dim
# permute for sliced rotary
def permute(_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int]=n_heads ,_UpperCAmelCase : Optional[int]=dim ,_UpperCAmelCase : List[str]=dim ):
return w.view(_UpperCAmelCase ,dima // n_heads // 2 ,2 ,_UpperCAmelCase ).transpose(1 ,2 ).reshape(_UpperCAmelCase ,_UpperCAmelCase )
print(F"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_a : Any =torch.load(os.path.join(_UpperCAmelCase ,"""consolidated.00.pth""" ) ,map_location="""cpu""" )
else:
# Sharded
_a : List[Any] =[
torch.load(os.path.join(_UpperCAmelCase ,F"consolidated.{i:02d}.pth" ) ,map_location="""cpu""" )
for i in range(_UpperCAmelCase )
]
_a : Any =0
_a : Optional[int] ={"""weight_map""": {}}
for layer_i in range(_UpperCAmelCase ):
_a : List[str] =F"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_a : List[str] ={
F"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wq.weight"] ),
F"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wk.weight"] ),
F"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[F"layers.{layer_i}.attention.wv.weight"],
F"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[F"layers.{layer_i}.attention.wo.weight"],
F"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w1.weight"],
F"model.layers.{layer_i}.mlp.down_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w2.weight"],
F"model.layers.{layer_i}.mlp.up_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w3.weight"],
F"model.layers.{layer_i}.input_layernorm.weight": loaded[F"layers.{layer_i}.attention_norm.weight"],
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[F"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_a : Tuple ={
F"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
F"layers.{layer_i}.attention_norm.weight"
].clone(),
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
F"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
_a : str =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wq.weight"].view(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Tuple =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wk.weight"].view(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wv.weight"].view(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase )
_a : List[str] =torch.cat(
[loaded[i][F"layers.{layer_i}.attention.wo.weight"] for i in range(_UpperCAmelCase )] ,dim=1 )
_a : Union[str, Any] =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w1.weight"] for i in range(_UpperCAmelCase )] ,dim=0 )
_a : Tuple =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w2.weight"] for i in range(_UpperCAmelCase )] ,dim=1 )
_a : Union[str, Any] =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w3.weight"] for i in range(_UpperCAmelCase )] ,dim=0 )
_a : str =inv_freq
for k, v in state_dict.items():
_a : Any =filename
param_count += v.numel()
torch.save(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Union[str, Any] =F"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_a : List[str] ={
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_a : int ={
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(_UpperCAmelCase )] ,dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_UpperCAmelCase )] ,dim=0 ),
}
for k, v in state_dict.items():
_a : Dict =filename
param_count += v.numel()
torch.save(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )
# Write configs
_a : Tuple ={"""total_size""": param_count * 2}
write_json(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,"""pytorch_model.bin.index.json""" ) )
_a : Optional[Any] =params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_a : int =params["""multiple_of"""] if """multiple_of""" in params else 256
_a : List[Any] =LlamaConfig(
hidden_size=_UpperCAmelCase ,intermediate_size=compute_intermediate_size(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,num_attention_heads=params["""n_heads"""] ,num_hidden_layers=params["""n_layers"""] ,rms_norm_eps=params["""norm_eps"""] ,num_key_value_heads=_UpperCAmelCase ,)
config.save_pretrained(_UpperCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_a : Any =LlamaForCausalLM.from_pretrained(_UpperCAmelCase ,torch_dtype=torch.floataa ,low_cpu_mem_usage=_UpperCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(_UpperCAmelCase ,safe_serialization=_UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> Optional[Any]:
# Initialize the tokenizer based on the `spm` model
_a : List[str] =LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
_a : List[Any] =tokenizer_class(_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : List[str] =argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" ,help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" ,)
parser.add_argument(
"""--model_size""" ,choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] ,)
parser.add_argument(
"""--output_dir""" ,help="""Location to write HF model and tokenizer""" ,)
parser.add_argument("""--safe_serialization""" ,type=_UpperCAmelCase ,help="""Whether or not to save using `safetensors`.""" )
_a : Optional[Any] =parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir ,input_base_path=os.path.join(args.input_dir ,args.model_size ) ,model_size=args.model_size ,safe_serialization=args.safe_serialization ,)
_a : List[Any] =os.path.join(args.input_dir ,"""tokenizer.model""" )
write_tokenizer(args.output_dir ,_UpperCAmelCase )
if __name__ == "__main__":
main()
| 276 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase (UpperCAmelCase__ , unittest.TestCase ):
_lowercase = SpeechTaTokenizer
_lowercase = False
_lowercase = True
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase = SpeechTaTokenizer(A_ )
__UpperCamelCase = AddedToken('<mask>',lstrip=A_,rstrip=A_ )
__UpperCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self: Optional[int],A_: Any ):
'''simple docstring'''
__UpperCamelCase = """this is a test"""
__UpperCamelCase = """this is a test"""
return input_text, output_text
def snake_case_ ( self: str,A_: Tuple,A_: Dict=False,A_: Tuple=20,A_: Any=5 ):
'''simple docstring'''
__UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(A_,clean_up_tokenization_spaces=A_ )
return text, ids
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = """<pad>"""
__UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ),A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ),A_ )
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'<s>' )
self.assertEqual(vocab_keys[1],'<pad>' )
self.assertEqual(vocab_keys[-4],'œ' )
self.assertEqual(vocab_keys[-2],'<mask>' )
self.assertEqual(vocab_keys[-1],'<ctc_blank>' )
self.assertEqual(len(A_ ),81 )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size,79 )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = tokenizer.vocab_size
__UpperCamelCase = len(A_ )
self.assertNotEqual(A_,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__UpperCamelCase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__UpperCamelCase = tokenizer.add_tokens(A_ )
__UpperCamelCase = tokenizer.vocab_size
__UpperCamelCase = len(A_ )
self.assertNotEqual(A_,0 )
self.assertEqual(A_,A_ )
self.assertEqual(A_,len(A_ ) )
self.assertEqual(A_,all_size + len(A_ ) )
__UpperCamelCase = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l',add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ),4 )
self.assertGreater(tokens[0],tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3],tokenizer.vocab_size - 1 )
__UpperCamelCase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__UpperCamelCase = tokenizer.add_special_tokens(A_ )
__UpperCamelCase = tokenizer.vocab_size
__UpperCamelCase = len(A_ )
self.assertNotEqual(A_,0 )
self.assertEqual(A_,A_ )
self.assertEqual(A_,len(A_ ) )
self.assertEqual(A_,all_size_a + len(A_ ) )
__UpperCamelCase = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l',add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ),6 )
self.assertGreater(tokens[0],tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0],tokens[1] )
self.assertGreater(tokens[-3],tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3],tokens[-4] )
self.assertEqual(tokens[0],tokenizer.eos_token_id )
self.assertEqual(tokens[-3],tokenizer.pad_token_id )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
def snake_case_ ( self: Tuple ):
'''simple docstring'''
pass
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(A_,[SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ),[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6],)
__UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A_,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
# fmt: off
self.assertListEqual(A_,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
__UpperCamelCase = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_,model_name='microsoft/speecht5_asr',revision='c5ef64c71905caeccde0e4462ef3f9077224c524',sequences=A_,)
| 310 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : str ) -> Dict:
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" ,[False, True] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ) -> Optional[Any]:
_a : Any =tmp_path / """cache"""
_a : int ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : Tuple =SqlDatasetReader(
"""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase ,_UpperCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" ,[
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ) -> List[Any]:
_a : Union[str, Any] =tmp_path / """cache"""
_a : str ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_a : Optional[int] =features.copy() if features else default_expected_features
_a : Union[str, Any] =(
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : Optional[Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
with contextlib.closing(sqlitea.connect(_UpperCAmelCase ) ) as con:
_a : Any =con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> Union[str, Any]:
_a : Union[str, Any] =tmp_path / """cache"""
_a : Union[str, Any] =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Tuple =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=1 ).write()
_a : Tuple =iter_sql_file(_UpperCAmelCase )
_a : List[Any] =iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase ,_UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[Any] ) -> Optional[int]:
_a : int =tmp_path / """cache"""
_a : Any =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Union[str, Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=2 ).write()
_a : List[Any] =iter_sql_file(_UpperCAmelCase )
_a : str =iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase ,_UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ) -> List[str]:
_a : List[str] =tmp_path / """cache"""
_a : Dict =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Optional[Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
with pytest.raises(_UpperCAmelCase ):
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=0 ).write()
| 276 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowerCAmelCase ( UpperCAmelCase__ ):
lowercase = "trocr"
lowercase = ["past_key_values"]
lowercase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=512 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = init_std
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = use_cache
__UpperCamelCase = scale_embedding
__UpperCamelCase = use_learned_position_embeddings
__UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
| 316 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: List[str] = logging.get_logger(__name__)
A__: Union[str, Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : int = "data2vec-text"
def __init__( self :str , SCREAMING_SNAKE_CASE :Optional[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Any=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Dict=3_0_7_2 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :List[str]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :int=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Dict=1e-12 , SCREAMING_SNAKE_CASE :int=1 , SCREAMING_SNAKE_CASE :Dict=0 , SCREAMING_SNAKE_CASE :List[Any]=2 , SCREAMING_SNAKE_CASE :str="absolute" , SCREAMING_SNAKE_CASE :Tuple=True , SCREAMING_SNAKE_CASE :Union[str, Any]=None , **SCREAMING_SNAKE_CASE :Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : Optional[Any] =vocab_size
_a : Optional[Any] =hidden_size
_a : Any =num_hidden_layers
_a : List[str] =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Any =intermediate_size
_a : str =hidden_dropout_prob
_a : Optional[Any] =attention_probs_dropout_prob
_a : Optional[Any] =max_position_embeddings
_a : Union[str, Any] =type_vocab_size
_a : Tuple =initializer_range
_a : Optional[int] =layer_norm_eps
_a : Tuple =position_embedding_type
_a : int =use_cache
_a : List[str] =classifier_dropout
class A__ ( UpperCAmelCase__ ):
@property
def __UpperCAmelCase ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_a : Tuple ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_a : List[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 276 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
A__: Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> int:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] = None ) -> Optional[int]:
_a : Any =tesseract_config if tesseract_config is not None else """"""
# apply OCR
_a : Optional[Any] =to_pil_image(_UpperCAmelCase )
_a , _a : List[Any] =pil_image.size
_a : List[str] =pytesseract.image_to_data(_UpperCAmelCase ,lang=_UpperCAmelCase ,output_type="""dict""" ,config=_UpperCAmelCase )
_a , _a , _a , _a , _a : str =data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_a : Tuple =[idx for idx, word in enumerate(_UpperCAmelCase ) if not word.strip()]
_a : List[Any] =[word for idx, word in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Dict =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : List[str] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : List[str] =[]
for x, y, w, h in zip(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
_a : int =[x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase )
# finally, normalize the bounding boxes
_a : str =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[Any] = ["pixel_values"]
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = "" , **SCREAMING_SNAKE_CASE :Tuple , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : List[Any] =size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_a : Tuple =get_size_dict(SCREAMING_SNAKE_CASE )
_a : Dict =do_resize
_a : Tuple =size
_a : str =resample
_a : Dict =apply_ocr
_a : Union[str, Any] =ocr_lang
_a : Dict =tesseract_config
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Dict , ) -> np.ndarray:
'''simple docstring'''
_a : int =get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
_a : Any =(size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :ImageInput , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE :ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
_a : Optional[int] =do_resize if do_resize is not None else self.do_resize
_a : Optional[int] =size if size is not None else self.size
_a : str =get_size_dict(SCREAMING_SNAKE_CASE )
_a : List[str] =resample if resample is not None else self.resample
_a : int =apply_ocr if apply_ocr is not None else self.apply_ocr
_a : str =ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Union[str, Any] =tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[str] =make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_a : List[Any] =[to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_a : Any =[]
_a : Any =[]
for image in images:
_a , _a : int =apply_tesseract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
words_batch.append(SCREAMING_SNAKE_CASE )
boxes_batch.append(SCREAMING_SNAKE_CASE )
if do_resize:
_a : Union[str, Any] =[self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_a : Dict =[flip_channel_order(SCREAMING_SNAKE_CASE ) for image in images]
_a : str =[to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
_a : str =BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE )
if apply_ocr:
_a : List[Any] =words_batch
_a : Dict =boxes_batch
return data
| 276 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : int = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase__ ):
lowerCAmelCase = "roberta"
def __init__( self , _UpperCAmelCase=5_0265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
__A : List[str] = vocab_size
__A : List[Any] = hidden_size
__A : int = num_hidden_layers
__A : Optional[int] = num_attention_heads
__A : Optional[Any] = hidden_act
__A : Union[str, Any] = intermediate_size
__A : Optional[Any] = hidden_dropout_prob
__A : str = attention_probs_dropout_prob
__A : List[Any] = max_position_embeddings
__A : List[str] = type_vocab_size
__A : Optional[Any] = initializer_range
__A : List[Any] = layer_norm_eps
__A : Tuple = position_embedding_type
__A : str = use_cache
__A : Any = classifier_dropout
class SCREAMING_SNAKE_CASE (UpperCAmelCase__ ):
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.task == "multiple-choice":
__A : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__A : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
]) | 190 |
'''simple docstring'''
from __future__ import annotations
import requests
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> dict:
_a : Any =F"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(_UpperCAmelCase ).json()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> list[dict]:
_a : Union[str, Any] ="""https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
_a : int =requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> str:
_a : Union[str, Any] =hackernews_top_stories(_UpperCAmelCase )
return "\n".join("""* [{title}]({url})""".format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 276 | 0 |
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 124 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : torch.FloatTensor
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :Tuple[str] = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[str] = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[int] = (6_4,) , SCREAMING_SNAKE_CASE :int = 1 , SCREAMING_SNAKE_CASE :str = "silu" , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :int = 2_5_6 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :float = 0.18_215 , SCREAMING_SNAKE_CASE :str = "group" , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_a : Union[str, Any] =Encoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , down_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , double_z=SCREAMING_SNAKE_CASE , )
_a : Optional[int] =vq_embed_dim if vq_embed_dim is not None else latent_channels
_a : Optional[int] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
_a : str =VectorQuantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beta=0.25 , remap=SCREAMING_SNAKE_CASE , sane_index_shape=SCREAMING_SNAKE_CASE )
_a : List[str] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
# pass init params to Decoder
_a : List[str] =Decoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , up_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , norm_type=SCREAMING_SNAKE_CASE , )
@apply_forward_hook
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> VQEncoderOutput:
'''simple docstring'''
_a : Optional[int] =self.encoder(SCREAMING_SNAKE_CASE )
_a : int =self.quant_conv(SCREAMING_SNAKE_CASE )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE )
@apply_forward_hook
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
_a , _a , _a : Tuple =self.quantize(SCREAMING_SNAKE_CASE )
else:
_a : str =h
_a : Dict =self.post_quant_conv(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =self.decoder(SCREAMING_SNAKE_CASE , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
_a : Tuple =sample
_a : int =self.encode(SCREAMING_SNAKE_CASE ).latents
_a : List[Any] =self.decode(SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE )
| 276 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> list:
lowerCAmelCase = []
lowerCAmelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowerCAmelCase = result + left + right
return input_list
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> list:
if len(_UpperCAmelCase ) <= 1:
return input_list
lowerCAmelCase = list(_UpperCAmelCase )
# iteration for two-way merging
lowerCAmelCase = 2
while p <= len(_UpperCAmelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ):
lowerCAmelCase = i
lowerCAmelCase = i + p - 1
lowerCAmelCase = (low + high + 1) // 2
lowerCAmelCase = merge(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# final merge of last two parts
if p * 2 >= len(_UpperCAmelCase ):
lowerCAmelCase = i
lowerCAmelCase = merge(_UpperCAmelCase , 0 , _UpperCAmelCase , len(_UpperCAmelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase__ : str = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
lowercase__ : str = []
else:
lowercase__ : int = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted))
| 338 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int]=1_3 , SCREAMING_SNAKE_CASE :Optional[int]=7 , SCREAMING_SNAKE_CASE :Tuple=False , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :List[str]=3_3 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Tuple=5 , SCREAMING_SNAKE_CASE :int=4 , SCREAMING_SNAKE_CASE :Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Optional[Any]=0.1 , SCREAMING_SNAKE_CASE :Tuple=0.1 , SCREAMING_SNAKE_CASE :str=5_1_2 , SCREAMING_SNAKE_CASE :Dict=1_6 , SCREAMING_SNAKE_CASE :Dict=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :str=3 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :List[str]=None , ) -> Union[str, Any]:
'''simple docstring'''
_a : Union[str, Any] =parent
_a : List[Any] =batch_size
_a : Optional[int] =seq_length
_a : Union[str, Any] =is_training
_a : List[Any] =use_input_mask
_a : Optional[int] =use_token_type_ids
_a : int =use_labels
_a : List[str] =vocab_size
_a : List[Any] =hidden_size
_a : int =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Any =intermediate_size
_a : str =hidden_act
_a : Union[str, Any] =hidden_dropout_prob
_a : Union[str, Any] =attention_probs_dropout_prob
_a : str =max_position_embeddings
_a : Dict =type_vocab_size
_a : Tuple =type_sequence_label_size
_a : Dict =initializer_range
_a : List[str] =num_labels
_a : Tuple =num_choices
_a : int =scope
def __UpperCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : List[Any] =None
if self.use_input_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
_a : str =None
_a : Dict =None
if self.use_labels:
_a : Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[str] =ids_tensor([self.batch_size] , self.num_choices )
_a : List[Any] =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int ) -> Tuple:
'''simple docstring'''
_a : Any =EsmModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Optional[Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
_a : str =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : str =EsmForMaskedLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.num_labels
_a : Tuple =EsmForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Tuple =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
_a : Optional[Any] =self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Any =config_and_inputs
_a : List[Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Any = False
__UpperCamelCase : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : str = ()
__UpperCamelCase : List[str] = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Union[str, Any] = True
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
_a : Dict =EsmModelTester(self )
_a : Optional[Any] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCAmelCase ( self :Tuple ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Dict =type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
_a : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
_a : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :str ) -> Dict:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Union[str, Any] =EsmModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
_a : Optional[Any] =self.model_tester.prepare_config_and_inputs()[0]
_a : Dict =EsmEmbeddings(config=SCREAMING_SNAKE_CASE )
_a : Tuple =torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
_a : Optional[Any] =torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_a : Any =create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =self.model_tester.prepare_config_and_inputs()[0]
_a : Optional[int] =EsmEmbeddings(config=SCREAMING_SNAKE_CASE )
_a : Tuple =torch.empty(2 , 4 , 3_0 )
_a : str =[
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_a : int =torch.as_tensor([expected_single_positions, expected_single_positions] )
_a : Any =embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def __UpperCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def __UpperCAmelCase ( self :str ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
pass
@require_torch
class A__ ( UpperCAmelCase__ ):
@slow
def __UpperCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
with torch.no_grad():
_a : Optional[int] =EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_a : Any =torch.tensor([[0, 1, 2, 3, 4, 5]] )
_a : Tuple =model(SCREAMING_SNAKE_CASE )[0]
_a : int =3_3
_a : Tuple =torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
_a : Any =EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_a : Any =torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_a : int =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 276 | 0 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
UpperCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class a__ ( UpperCAmelCase__ ):
def __init__( self , **_A ):
"""simple docstring"""
super().__init__(**_A )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self , _A , **_A ):
"""simple docstring"""
return super().__call__(_A , **_A )
def __SCREAMING_SNAKE_CASE( self , **_A ):
"""simple docstring"""
__lowerCAmelCase = {}
if "candidate_labels" in kwargs:
__lowerCAmelCase = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__lowerCAmelCase = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def __SCREAMING_SNAKE_CASE( self , _A , _A=None , _A="This is a sound of {}." ):
"""simple docstring"""
if isinstance(_A , _A ):
if audio.startswith("http://" ) or audio.startswith("https://" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__lowerCAmelCase = requests.get(_A ).content
else:
with open(_A , "rb" ) as f:
__lowerCAmelCase = f.read()
if isinstance(_A , _A ):
__lowerCAmelCase = ffmpeg_read(_A , self.feature_extractor.sampling_rate )
if not isinstance(_A , np.ndarray ):
raise ValueError("We expect a numpy ndarray as input" )
if len(audio.shape ) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" )
__lowerCAmelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" )
__lowerCAmelCase = candidate_labels
__lowerCAmelCase = [hypothesis_template.format(_A ) for x in candidate_labels]
__lowerCAmelCase = self.tokenizer(_A , return_tensors=self.framework , padding=_A )
__lowerCAmelCase = [text_inputs]
return inputs
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = model_inputs.pop("candidate_labels" )
__lowerCAmelCase = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , _A ):
__lowerCAmelCase = text_inputs[0]
else:
# Batching case.
__lowerCAmelCase = text_inputs[0][0]
__lowerCAmelCase = self.model(**_A , **_A )
__lowerCAmelCase = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = model_outputs.pop("candidate_labels" )
__lowerCAmelCase = model_outputs["""logits"""][0]
if self.framework == "pt":
__lowerCAmelCase = logits.softmax(dim=0 )
__lowerCAmelCase = probs.tolist()
else:
raise ValueError("`tf` framework not supported." )
__lowerCAmelCase = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result
| 92 |
'''simple docstring'''
from math import isqrt
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 ,isqrt(_UpperCAmelCase ) + 1 ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10**6 ) -> int:
_a : List[Any] =0
_a : str =1
_a : Optional[Any] =7
while prime_candidate < max_prime:
primes_count += is_prime(_UpperCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 276 | 0 |
from collections.abc import Sequence
def UpperCamelCase (lowercase_: Sequence[float] , lowercase_: bool = False ) -> float:
if not arr:
return 0
A__ : Dict = 0 if allow_empty_subarrays else float("""-inf""" )
A__ : Optional[int] = 0.0
for num in arr:
A__ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num )
A__ : int = max(_UpperCAmelCase , _UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
A_ : Tuple = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 192 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 276 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = 13
UpperCamelCase : str = 7
UpperCamelCase : List[str] = True
UpperCamelCase : Optional[Any] = True
UpperCamelCase : str = True
UpperCamelCase : List[str] = True
UpperCamelCase : str = 99
UpperCamelCase : Optional[Any] = 384
UpperCamelCase : str = 2
UpperCamelCase : List[str] = 4
UpperCamelCase : Optional[Any] = 37
UpperCamelCase : Optional[Any] = """gelu"""
UpperCamelCase : List[str] = 0.1
UpperCamelCase : Dict = 0.1
UpperCamelCase : List[str] = 512
UpperCamelCase : List[str] = 16
UpperCamelCase : Union[str, Any] = 2
UpperCamelCase : Optional[int] = 0.02
UpperCamelCase : int = 3
UpperCamelCase : Any = 4
UpperCamelCase : Optional[int] = 128
UpperCamelCase : Any = 2
UpperCamelCase : Optional[int] = 9
UpperCamelCase : int = 1
UpperCamelCase : List[Any] = None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Tuple = None
if self.use_input_mask:
UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Dict = None
if self.use_token_type_ids:
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : int = None
UpperCamelCase : List[Any] = None
UpperCamelCase : Any = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = TFConvBertModel(config=A_ )
UpperCamelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase : Dict = [input_ids, input_mask]
UpperCamelCase : Optional[Any] = model(A_ )
UpperCamelCase : Optional[int] = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = TFConvBertForMaskedLM(config=A_ )
UpperCamelCase : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCamelCase : List[str] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = self.num_labels
UpperCamelCase : Dict = TFConvBertForSequenceClassification(config=A_ )
UpperCamelCase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = self.num_choices
UpperCamelCase : int = TFConvBertForMultipleChoice(config=A_ )
UpperCamelCase : List[Any] = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Dict = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Dict = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase : Tuple = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : List[str] = TFConvBertForTokenClassification(config=A_ )
UpperCamelCase : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCamelCase : List[str] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = TFConvBertForQuestionAnswering(config=A_ )
UpperCamelCase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCamelCase : List[Any] = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.prepare_config_and_inputs()
(
UpperCamelCase
) : Dict = config_and_inputs
UpperCamelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
_UpperCAmelCase :str = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Union[str, Any] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :str = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = TFConvBertModelTester(self )
UpperCamelCase : Any = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Tuple = True
UpperCamelCase : List[Any] = True
if hasattr(A_ , "use_cache" ):
UpperCamelCase : Dict = True
UpperCamelCase : int = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Any = getattr(self.model_tester , "key_length" , A_ )
for model_class in self.all_model_classes:
UpperCamelCase : List[str] = self._prepare_for_class(A_ , A_ )
UpperCamelCase : List[Any] = model_class(A_ )
UpperCamelCase : Any = len(model(A_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
UpperCamelCase : int = os.path.join(A_ , "saved_model" , "1" )
UpperCamelCase : Union[str, Any] = tf.keras.models.load_model(A_ )
UpperCamelCase : Any = model(A_ )
if self.is_encoder_decoder:
UpperCamelCase : Optional[Any] = outputs["""encoder_hidden_states"""]
UpperCamelCase : Dict = outputs["""encoder_attentions"""]
else:
UpperCamelCase : Tuple = outputs["""hidden_states"""]
UpperCamelCase : Any = outputs["""attentions"""]
self.assertEqual(len(A_ ) , A_ )
UpperCamelCase : int = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[Any] = True
UpperCamelCase : Union[str, Any] = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Optional[int] = getattr(self.model_tester , "key_length" , A_ )
UpperCamelCase : Dict = getattr(self.model_tester , "key_length" , A_ )
def check_decoder_attentions_output(A_ ):
UpperCamelCase : List[str] = len(A_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase : Union[str, Any] = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ ):
UpperCamelCase : Tuple = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase : str = True
UpperCamelCase : Tuple = False
UpperCamelCase : List[Any] = model_class(A_ )
UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) )
UpperCamelCase : Union[str, Any] = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
UpperCamelCase : Optional[Any] = model_class(A_ )
UpperCamelCase : str = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Optional[int] = model_class(A_ )
UpperCamelCase : Union[str, Any] = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
UpperCamelCase : Dict = True
UpperCamelCase : Optional[Any] = True
UpperCamelCase : List[Any] = model_class(A_ )
UpperCamelCase : Optional[Any] = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCamelCase : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase : Dict = model(A_ )[0]
UpperCamelCase : List[str] = [1, 6, 768]
self.assertEqual(output.shape , A_ )
UpperCamelCase : Tuple = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
| 52 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" ,[
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(_UpperCAmelCase ,i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Dict ) -> Optional[Any]:
_a : Tuple =_distribute_shards(**_UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" ,[
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any] ) -> List[str]:
_a : List[str] =_split_gen_kwargs(_UpperCAmelCase ,_UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" ,[
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
if expected is RuntimeError:
with pytest.raises(_UpperCAmelCase ):
_number_of_shards_in_gen_kwargs(_UpperCAmelCase )
else:
_a : Dict =_number_of_shards_in_gen_kwargs(_UpperCAmelCase )
assert out == expected
| 276 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ = 16
A_ = 32
def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : int = 16 ):
"""simple docstring"""
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_snake_case : Tuple = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : str = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
_snake_case : Optional[Any] = 8
else:
_snake_case : Union[str, Any] = None
return tokenizer.pad(
_UpperCAmelCase , padding="""longest""" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
_snake_case : str = DataLoader(
tokenized_datasets["""train"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
_snake_case : List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _UpperCAmelCase ) == "1":
_snake_case : int = 2
# New Code #
_snake_case : int = int(args.gradient_accumulation_steps )
# Initialize accelerator
_snake_case : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : List[Any] = config["""lr"""]
_snake_case : List[str] = int(config["""num_epochs"""] )
_snake_case : Any = int(config["""seed"""] )
_snake_case : Tuple = int(config["""batch_size"""] )
_snake_case : Any = evaluate.load("""glue""" , """mrpc""" )
set_seed(_UpperCAmelCase )
_snake_case : Tuple = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : List[str] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : Dict = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
_snake_case : Any = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case : Optional[Any] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCAmelCase ):
_snake_case : int = model(**_UpperCAmelCase )
_snake_case : Union[str, Any] = output.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : Optional[Any] = model(**_UpperCAmelCase )
_snake_case : Any = outputs.logits.argmax(dim=-1 )
_snake_case : Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
_snake_case : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _UpperCAmelCase )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_UpperCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_snake_case : int = parser.parse_args()
_snake_case : int = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 64 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Tuple = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "roc_bert"
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :Tuple=3_0_5_2_2 , SCREAMING_SNAKE_CASE :List[str]=7_6_8 , SCREAMING_SNAKE_CASE :Dict=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE :List[Any]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Optional[int]="absolute" , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :Optional[Any]=9_1_0 , SCREAMING_SNAKE_CASE :Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE :str=2_4_8_5_8 , SCREAMING_SNAKE_CASE :List[Any]=True , **SCREAMING_SNAKE_CASE :Tuple , ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =vocab_size
_a : List[str] =max_position_embeddings
_a : Optional[Any] =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[str] =num_attention_heads
_a : int =intermediate_size
_a : Any =hidden_act
_a : Dict =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : str =initializer_range
_a : Optional[int] =type_vocab_size
_a : Any =layer_norm_eps
_a : Any =use_cache
_a : Optional[int] =enable_pronunciation
_a : Optional[Any] =enable_shape
_a : Optional[Any] =pronunciation_embed_dim
_a : Tuple =pronunciation_vocab_size
_a : Union[str, Any] =shape_embed_dim
_a : Any =shape_vocab_size
_a : Tuple =concat_input
_a : List[str] =position_embedding_type
_a : List[str] =classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 276 | 0 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _UpperCAmelCase :
def __init__( self : Optional[int] , lowercase_ : Tuple , lowercase_ : List[Any]=100 , lowercase_ : str=13 , lowercase_ : List[str]=30 , lowercase_ : str=2 , lowercase_ : int=3 , lowercase_ : Tuple=True , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Dict=4 , lowercase_ : Optional[Any]=4 , lowercase_ : str=37 , lowercase_ : Tuple="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[Any]=10 , lowercase_ : Any=0.02 , lowercase_ : Any=3 , lowercase_ : Dict=None , lowercase_ : List[Any]=[0, 1, 2, 3] , ):
snake_case_ : Tuple = parent
snake_case_ : Dict = 100
snake_case_ : List[Any] = batch_size
snake_case_ : Dict = image_size
snake_case_ : Union[str, Any] = patch_size
snake_case_ : Any = num_channels
snake_case_ : Optional[int] = is_training
snake_case_ : Optional[int] = use_labels
snake_case_ : List[str] = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Dict = hidden_act
snake_case_ : str = hidden_dropout_prob
snake_case_ : int = attention_probs_dropout_prob
snake_case_ : Optional[Any] = type_sequence_label_size
snake_case_ : Optional[int] = initializer_range
snake_case_ : Optional[int] = scope
snake_case_ : Tuple = out_indices
snake_case_ : Tuple = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ : Optional[int] = (image_size // patch_size) ** 2
snake_case_ : Optional[Any] = num_patches + 1
def _snake_case ( self : Any ):
snake_case_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Dict = None
snake_case_ : int = None
if self.use_labels:
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _snake_case ( self : Optional[int] ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _snake_case ( self : int , lowercase_ : int , lowercase_ : Any , lowercase_ : int , lowercase_ : Dict ):
snake_case_ : List[Any] = BeitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Any , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Any ):
snake_case_ : Optional[Any] = BeitForMaskedImageModeling(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _snake_case ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ):
snake_case_ : str = self.type_sequence_label_size
snake_case_ : Union[str, Any] = BeitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Union[str, Any] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ : Optional[Any] = 1
snake_case_ : Optional[int] = BeitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Tuple = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Union[str, Any] ):
snake_case_ : Any = self.num_labels
snake_case_ : List[Any] = BeitForSemanticSegmentation(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
snake_case_ : Dict = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _snake_case ( self : Tuple ):
snake_case_ : int = self.prepare_config_and_inputs()
snake_case_ : List[str] = config_and_inputs
snake_case_ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Tuple = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowerCAmelCase : List[str] = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : List[Any] = False
def _snake_case ( self : List[Any] ):
snake_case_ : Dict = BeitModelTester(self )
snake_case_ : List[str] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def _snake_case ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def _snake_case ( self : Any ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _snake_case ( self : Union[str, Any] ):
pass
def _snake_case ( self : Any ):
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Union[str, Any] = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def _snake_case ( self : str ):
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(lowercase_ )
snake_case_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Dict = [*signature.parameters.keys()]
snake_case_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def _snake_case ( self : Dict ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self : List[Any] ):
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def _snake_case ( self : Dict ):
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
def _snake_case ( self : str ):
if not self.model_tester.is_training:
return
snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowercase_ ), BeitForMaskedImageModeling]:
continue
snake_case_ : Tuple = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
snake_case_ : Optional[int] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
snake_case_ : List[Any] = model(**lowercase_ ).loss
loss.backward()
def _snake_case ( self : int ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case_ : List[str] = False
snake_case_ : int = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowercase_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case_ : Any = model_class(lowercase_ )
model.gradient_checkpointing_enable()
model.to(lowercase_ )
model.train()
snake_case_ : Tuple = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
snake_case_ : Dict = model(**lowercase_ ).loss
loss.backward()
def _snake_case ( self : List[str] ):
snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = _config_zero_init(lowercase_ )
for model_class in self.all_model_classes:
snake_case_ : Dict = model_class(config=lowercase_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def _snake_case ( self : str ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Union[str, Any] = BeitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __lowercase ( ):
snake_case_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase):
@cached_property
def _snake_case ( self : Dict ):
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Union[str, Any] = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(lowercase_ )
snake_case_ : Tuple = self.default_image_processor
snake_case_ : Union[str, Any] = prepare_img()
snake_case_ : Tuple = image_processor(images=lowercase_ , return_tensors='''pt''' ).pixel_values.to(lowercase_ )
# prepare bool_masked_pos
snake_case_ : int = torch.ones((1, 196) , dtype=torch.bool ).to(lowercase_ )
# forward pass
with torch.no_grad():
snake_case_ : Any = model(pixel_values=lowercase_ , bool_masked_pos=lowercase_ )
snake_case_ : int = outputs.logits
# verify the logits
snake_case_ : Any = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , lowercase_ )
snake_case_ : Optional[int] = torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(lowercase_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowercase_ , atol=1E-2 ) )
@slow
def _snake_case ( self : int ):
snake_case_ : List[str] = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(lowercase_ )
snake_case_ : Dict = self.default_image_processor
snake_case_ : Tuple = prepare_img()
snake_case_ : str = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**lowercase_ )
snake_case_ : int = outputs.logits
# verify the logits
snake_case_ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(logits.shape , lowercase_ )
snake_case_ : Optional[int] = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(lowercase_ )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ) )
snake_case_ : List[Any] = 281
self.assertEqual(logits.argmax(-1 ).item() , lowercase_ )
@slow
def _snake_case ( self : Any ):
snake_case_ : int = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
lowercase_ )
snake_case_ : List[Any] = self.default_image_processor
snake_case_ : Optional[Any] = prepare_img()
snake_case_ : Optional[Any] = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
snake_case_ : Any = model(**lowercase_ )
snake_case_ : Union[str, Any] = outputs.logits
# verify the logits
snake_case_ : Tuple = torch.Size((1, 21841) )
self.assertEqual(logits.shape , lowercase_ )
snake_case_ : List[str] = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(lowercase_ )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ) )
snake_case_ : List[str] = 2396
self.assertEqual(logits.argmax(-1 ).item() , lowercase_ )
@slow
def _snake_case ( self : int ):
snake_case_ : Union[str, Any] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
snake_case_ : List[Any] = model.to(lowercase_ )
snake_case_ : Tuple = BeitImageProcessor(do_resize=lowercase_ , size=640 , do_center_crop=lowercase_ )
snake_case_ : Tuple = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case_ : Tuple = Image.open(ds[0]['''file'''] )
snake_case_ : List[str] = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
snake_case_ : Any = model(**lowercase_ )
snake_case_ : Tuple = outputs.logits
# verify the logits
snake_case_ : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , lowercase_ )
snake_case_ : Optional[Any] = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
snake_case_ : Dict = torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] , device=lowercase_ , )
else:
snake_case_ : List[str] = torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] , device=lowercase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def _snake_case ( self : int ):
snake_case_ : Optional[int] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
snake_case_ : List[str] = model.to(lowercase_ )
snake_case_ : List[str] = BeitImageProcessor(do_resize=lowercase_ , size=640 , do_center_crop=lowercase_ )
snake_case_ : List[str] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case_ : Tuple = Image.open(ds[0]['''file'''] )
snake_case_ : Tuple = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
snake_case_ : Dict = model(**lowercase_ )
snake_case_ : Dict = outputs.logits.detach().cpu()
snake_case_ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowercase_ , target_sizes=[(500, 300)] )
snake_case_ : List[str] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowercase_ )
snake_case_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowercase_ )
snake_case_ : Any = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , lowercase_ )
| 264 |
'''simple docstring'''
class A__ :
def __init__( self :List[str] ) -> List[Any]:
'''simple docstring'''
_a : Tuple =0
_a : Any =0
_a : int ={}
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :List[str] ) -> Optional[int]:
'''simple docstring'''
if vertex not in self.adjacency:
_a : Dict ={}
self.num_vertices += 1
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Any ) -> List[str]:
'''simple docstring'''
self.add_vertex(SCREAMING_SNAKE_CASE )
self.add_vertex(SCREAMING_SNAKE_CASE )
if head == tail:
return
_a : Any =weight
_a : Tuple =weight
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =self.get_edges()
for edge in edges:
_a , _a , _a : List[str] =edge
edges.remove((tail, head, weight) )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
_a : str =list(edges[i] )
edges.sort(key=lambda SCREAMING_SNAKE_CASE : e[2] )
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a : Union[str, Any] =edges[i][2] + 1
for edge in edges:
_a , _a , _a : Tuple =edge
_a : Tuple =weight
_a : List[Any] =weight
def __str__( self :int ) -> str:
'''simple docstring'''
_a : int =""""""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a : str =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("""\n""" )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : Union[str, Any] =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[Any]=None ) -> Optional[int]:
'''simple docstring'''
_a : str =Graph()
if vertices is None:
_a : Union[str, Any] =[]
if edges is None:
_a : List[Any] =[]
for vertex in vertices:
g.add_vertex(SCREAMING_SNAKE_CASE )
for edge in edges:
g.add_edge(*SCREAMING_SNAKE_CASE )
return g
class A__ :
def __init__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_a : Optional[int] ={}
_a : List[str] ={}
def __len__( self :List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.parent )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Tuple ) -> Dict:
'''simple docstring'''
if item in self.parent:
return self.find(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =item
_a : List[str] =0
return item
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Dict ) -> List[str]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(SCREAMING_SNAKE_CASE )
if item != self.parent[item]:
_a : str =self.find(self.parent[item] )
return self.parent[item]
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :List[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[int] =self.find(SCREAMING_SNAKE_CASE )
_a : Dict =self.find(SCREAMING_SNAKE_CASE )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a : Any =roota
return roota
if self.rank[roota] < self.rank[roota]:
_a : List[str] =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a : List[Any] =roota
return roota
return None
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict ) -> Union[str, Any]:
'''simple docstring'''
_a : Any =graph.num_vertices
_a : Union[str, Any] =Graph.UnionFind()
_a : Optional[int] =[]
while num_components > 1:
_a : str ={}
for vertex in graph.get_vertices():
_a : List[str] =-1
_a : Any =graph.get_edges()
for edge in edges:
_a , _a , _a : Tuple =edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a : Any =edge
_a : Any =union_find.find(SCREAMING_SNAKE_CASE )
_a : List[Any] =union_find.find(SCREAMING_SNAKE_CASE )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a : Optional[int] =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a : List[Any] =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a : Optional[Any] =cheap_edge[vertex]
if union_find.find(SCREAMING_SNAKE_CASE ) != union_find.find(SCREAMING_SNAKE_CASE ):
union_find.union(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
mst_edges.append(cheap_edge[vertex] )
_a : str =num_components - 1
_a : str =Graph.build(edges=SCREAMING_SNAKE_CASE )
return mst
| 276 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (UpperCAmelCase__ , unittest.TestCase ):
_lowercase = OpenAIGPTTokenizer
_lowercase = OpenAIGPTTokenizerFast
_lowercase = True
_lowercase = False
def snake_case_ ( self: str ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[Any],A_: str ):
'''simple docstring'''
return "lower newer", "lower newer"
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = OpenAIGPTTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = """lower"""
__UpperCamelCase = ["""low""", """er</w>"""]
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ["""<unk>"""]
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
def snake_case_ ( self: Optional[Any],A_: int=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(A_,**A_ )
# Simple input
__UpperCamelCase = """This is a simple input"""
__UpperCamelCase = ["""This is a simple input 1""", """This is a simple input 2"""]
__UpperCamelCase = ("""This is a simple input""", """This is a pair""")
__UpperCamelCase = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(A_,tokenizer_r.encode,A_,max_length=A_,padding='max_length' )
# Simple input
self.assertRaises(A_,tokenizer_r.encode_plus,A_,max_length=A_,padding='max_length' )
# Simple input
self.assertRaises(
A_,tokenizer_r.batch_encode_plus,A_,max_length=A_,padding='max_length',)
# Pair input
self.assertRaises(A_,tokenizer_r.encode,A_,max_length=A_,padding='max_length' )
# Pair input
self.assertRaises(A_,tokenizer_r.encode_plus,A_,max_length=A_,padding='max_length' )
# Pair input
self.assertRaises(
A_,tokenizer_r.batch_encode_plus,A_,max_length=A_,padding='max_length',)
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowerCamelCase (UpperCAmelCase__ ):
pass
| 310 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A__: Union[str, Any] = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
A__: Tuple = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
A__: Union[str, Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
A__: List[Any] = requests.get(image_url).content
A__: List[str] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 276 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : str = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316 |
'''simple docstring'''
A__: Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 276 | 0 |
import random
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : float , UpperCamelCase_ : bool = False ) -> dict:
"""simple docstring"""
lowerCAmelCase__ = {i: [] for i in range(_UpperCAmelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCAmelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCAmelCase ):
for j in range(i + 1 , _UpperCAmelCase ):
if random.random() < probability:
graph[i].append(_UpperCAmelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCAmelCase )
return graph
def _a ( UpperCamelCase_ : int ) -> dict:
"""simple docstring"""
return {
i: [j for j in range(_UpperCAmelCase ) if i != j] for i in range(_UpperCAmelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
'''simple docstring'''
A__: Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__: Any = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__: int = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> str:
assert len(str(_UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_a : List[str] =year // 100
_a : List[str] =(5 * (century % 4) + 2) % 7
_a : Optional[int] =year % 100
_a : Any =centurian % 12
_a : int =(
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_a : Optional[Any] =(
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_a : str =(dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.