code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import math
import unittest
def lowerCamelCase__ ( a ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a_ ( unittest.TestCase ):
def lowercase__ ( self : str ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def lowercase__ ( self : str ):
with self.assertRaises(__lowerCAmelCase ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 356 |
'''simple docstring'''
import sys
__UpperCamelCase : List[str] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str = N ) -> int:
"""simple docstring"""
__a = -sys.maxsize - 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 12 ):
__a = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
__a = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""") | 448 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : int = '''naver-clova-ix/donut-base-finetuned-docvqa'''
__UpperCAmelCase : Any = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
__UpperCAmelCase : Union[str, Any] = '''document_qa'''
__UpperCAmelCase : Any = AutoProcessor
__UpperCAmelCase : int = VisionEncoderDecoderModel
__UpperCAmelCase : Optional[int] = ['''image''', '''text''']
__UpperCAmelCase : Optional[int] = ['''text''']
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
snake_case : str = task_prompt.replace("{user_input}" , UpperCamelCase__ )
snake_case : Union[str, Any] = self.pre_processor.tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="pt" ).input_ids
snake_case : List[str] = self.pre_processor(UpperCamelCase__ , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCamelCase__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCamelCase__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCamelCase__ , ).sequences
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = self.pre_processor.batch_decode(UpperCamelCase__ )[0]
snake_case : Dict = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
snake_case : Dict = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
snake_case : str = re.sub(r"<.*?>" , "" , UpperCamelCase__ , count=1 ).strip() # remove first task start token
snake_case : Union[str, Any] = self.pre_processor.tokenajson(UpperCamelCase__ )
return sequence["answer"]
| 117 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = LayoutLMTokenizer
__UpperCAmelCase : List[Any] = LayoutLMTokenizerFast
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Dict = True
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
super().setUp()
snake_case : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : str = "UNwant\u00E9d,running"
snake_case : Optional[int] = "unwanted, running"
return input_text, output_text
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : List[str] = self.tokenizer_class(self.vocab_file )
snake_case : Optional[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCamelCase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
pass
| 117 | 1 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__snake_case: Optional[int] = HfArgumentParser(InitializationArguments)
__snake_case: Any = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__snake_case: Any = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__snake_case: int = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
__snake_case: str = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__snake_case: int = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 577 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case: Dict = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: str = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__snake_case: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 577 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self ):
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = BlipImageProcessor()
__lowerCamelCase = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
__lowerCamelCase = BlipProcessor(UpperCAmelCase , UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self , **UpperCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer
def lowerCamelCase_ ( self , **UpperCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def lowerCamelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ):
__lowerCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__lowerCamelCase = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self ):
__lowerCamelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowerCamelCase = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
__lowerCamelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(UpperCAmelCase , return_tensors="""np""" )
__lowerCamelCase = processor(images=UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__lowerCamelCase = """lower newer"""
__lowerCamelCase = processor(text=UpperCAmelCase )
__lowerCamelCase = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__lowerCamelCase = """lower newer"""
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase = processor.batch_decode(UpperCAmelCase )
__lowerCamelCase = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__lowerCamelCase = """lower newer"""
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=UpperCAmelCase , images=UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 710 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def UpperCamelCase__ ( _A: Optional[Any] , _A: Any ):
'''simple docstring'''
__lowerCamelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
__lowerCamelCase = Image.open(requests.get(_A , stream=_A ).raw ).convert("""RGB""" )
__lowerCamelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowerCamelCase = transform(_A ).unsqueeze(0 ).to(_A )
return image
def UpperCamelCase__ ( _A: Any ):
'''simple docstring'''
if "visual_encoder" in key:
__lowerCamelCase = re.sub("""visual_encoder*""" , """vision_model.encoder""" , _A )
if "blocks" in key:
__lowerCamelCase = re.sub(R"""blocks""" , """layers""" , _A )
if "attn" in key:
__lowerCamelCase = re.sub(R"""attn""" , """self_attn""" , _A )
if "norm1" in key:
__lowerCamelCase = re.sub(R"""norm1""" , """layer_norm1""" , _A )
if "norm2" in key:
__lowerCamelCase = re.sub(R"""norm2""" , """layer_norm2""" , _A )
if "encoder.norm" in key:
__lowerCamelCase = re.sub(R"""encoder.norm""" , """post_layernorm""" , _A )
if "encoder.patch_embed.proj" in key:
__lowerCamelCase = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , _A )
if "encoder.pos_embed" in key:
__lowerCamelCase = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , _A )
if "encoder.cls_token" in key:
__lowerCamelCase = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , _A )
if "self_attn" in key:
__lowerCamelCase = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , _A )
return key
@torch.no_grad()
def UpperCamelCase__ ( _A: List[Any] , _A: Dict=None ):
'''simple docstring'''
if config_path is not None:
__lowerCamelCase = BlipConfig.from_pretrained(_A )
else:
__lowerCamelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__lowerCamelCase = BlipForConditionalGeneration(_A ).eval()
__lowerCamelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
__lowerCamelCase = blip_decoder(pretrained=_A , image_size=384 , vit="""base""" )
__lowerCamelCase = pt_model.eval()
__lowerCamelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
__lowerCamelCase = modified_state_dict.pop(_A )
__lowerCamelCase = rename_key(_A )
__lowerCamelCase = value
hf_model.load_state_dict(_A )
__lowerCamelCase = 384
__lowerCamelCase = load_demo_image(image_size=_A , device="""cpu""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = tokenizer(["""a picture of"""] ).input_ids
__lowerCamelCase = hf_model.generate(_A , _A )
assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
__lowerCamelCase = hf_model.generate(_A )
assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_A )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowerCamelCase = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
__lowerCamelCase = blip_vqa(pretrained=_A , image_size=_A , vit="""base""" )
vqa_model.eval()
__lowerCamelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowerCamelCase = modified_state_dict.pop(_A )
__lowerCamelCase = rename_key(_A )
__lowerCamelCase = value
__lowerCamelCase = BlipForQuestionAnswering(_A )
hf_vqa_model.load_state_dict(_A )
__lowerCamelCase = ["""How many dogs are in this image?"""]
__lowerCamelCase = tokenizer(_A , return_tensors="""pt""" ).input_ids
__lowerCamelCase = hf_vqa_model.generate(_A , _A )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
__lowerCamelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
__lowerCamelCase = blip_itm(pretrained=_A , image_size=_A , vit="""base""" )
itm_model.eval()
__lowerCamelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
__lowerCamelCase = modified_state_dict.pop(_A )
__lowerCamelCase = rename_key(_A )
__lowerCamelCase = value
__lowerCamelCase = BlipForImageTextRetrieval(_A )
__lowerCamelCase = ["""A picture of a woman with a dog sitting in a beach"""]
__lowerCamelCase = tokenizer(
_A , return_tensors="""pt""" , padding="""max_length""" , truncation=_A , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_A )
hf_itm_model.eval()
__lowerCamelCase = hf_itm_model(_A , _A , use_itm_head=_A )
__lowerCamelCase = hf_itm_model(_A , _A , use_itm_head=_A )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
_a : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_a : Optional[int] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 571 | 0 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _UpperCamelCase :
'''simple docstring'''
def lowercase__ ( self , _a ):
"""simple docstring"""
raise NotImplementedError()
def lowercase__ ( self ):
"""simple docstring"""
raise NotImplementedError()
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self , _a , _a = False , **_a ):
"""simple docstring"""
a__ = tokenizer
a__ = skip_prompt
a__ = decode_kwargs
# variables used in the streaming process
a__ = []
a__ = 0
a__ = True
def lowercase__ ( self , _a ):
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
a__ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
a__ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
a__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
a__ = text[self.print_len :]
a__ = []
a__ = 0
# If the last token is a CJK character, we print the characters.
elif len(_a ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
a__ = text[self.print_len :]
self.print_len += len(_a )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
a__ = text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(_a )
self.on_finalized_text(_a )
def lowercase__ ( self ):
"""simple docstring"""
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
a__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
a__ = text[self.print_len :]
a__ = []
a__ = 0
else:
a__ = ''
a__ = True
self.on_finalized_text(_a , stream_end=_a )
def lowercase__ ( self , _a , _a = False ):
"""simple docstring"""
print(_a , flush=_a , end='' if not stream_end else None )
def lowercase__ ( self , _a ):
"""simple docstring"""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self , _a , _a = False , _a = None , **_a ):
"""simple docstring"""
super().__init__(_a , _a , **_a )
a__ = Queue()
a__ = None
a__ = timeout
def lowercase__ ( self , _a , _a = False ):
"""simple docstring"""
self.text_queue.put(_a , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
"""simple docstring"""
return self
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 394 |
'''simple docstring'''
def lowerCAmelCase_ ( a : list , a : int , a : int = 0 , a : int = 0 ):
a__ = right or len(a ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a , a , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 394 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'roformer'
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=50_000 , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Tuple=768 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : List[str]=12 , UpperCamelCase__ : List[Any]=3_072 , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=1_536 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : List[Any]=1e-12 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[Any]=True , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = vocab_size
lowercase_ = hidden_size if embedding_size is None else embedding_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = rotary_value
lowercase_ = use_cache
class UpperCamelCase__ ( __magic_name__ ):
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 650 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = ['pixel_values']
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_pad
lowercase_ = pad_size
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
lowercase_ = (old_height // size + 1) * size - old_height
lowercase_ = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_pad if do_pad is not None else self.do_pad
lowercase_ = pad_size if pad_size is not None else self.pad_size
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_pad:
lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650 | 1 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
A : int = logging.getLogger(__name__)
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , ):
snake_case : Optional[Any] =bnb_quantization_config.load_in_abit
snake_case : List[Any] =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
snake_case : Any =[]
# custom device map
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(device_map.keys() ) > 1:
snake_case : Optional[int] =[key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
snake_case : Any =get_keys_to_not_convert(lowerCamelCase_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowerCamelCase_ )
snake_case : str =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
snake_case : int =[]
snake_case : Optional[Any] =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowerCamelCase_ )
# compatibility with peft
snake_case : str =load_in_abit
snake_case : int =load_in_abit
snake_case : str =get_parameter_device(lowerCamelCase_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
snake_case : Dict =replace_with_bnb_layers(lowerCamelCase_ , lowerCamelCase_ , modules_to_not_convert=lowerCamelCase_ )
# convert param to the right dtype
snake_case : int =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
snake_case : Tuple =name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
snake_case : Tuple =getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowerCamelCase_ ):
param.to(lowerCamelCase_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
snake_case : List[str] =replace_with_bnb_layers(
lowerCamelCase_ , lowerCamelCase_ , modules_to_not_convert=lowerCamelCase_ )
snake_case : int =get_quantized_model_device_map(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , max_memory=lowerCamelCase_ , no_split_module_classes=lowerCamelCase_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
snake_case : Tuple =True
snake_case : int =any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCamelCase_ , offload_state_dict=lowerCamelCase_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowerCamelCase_ , device_map=lowerCamelCase_ , offload_dir=lowerCamelCase_ )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None ):
if device_map is None:
if torch.cuda.is_available():
snake_case : Dict ={"""""": torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
snake_case : Tuple ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
snake_case : Optional[Any] ={}
snake_case : Dict =special_dtypes
snake_case : str =no_split_module_classes
snake_case : Dict =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
snake_case : Optional[Any] =get_balanced_memory(
lowerCamelCase_ , low_zero=(device_map == '''balanced_low_0''') , max_memory=lowerCamelCase_ , **lowerCamelCase_ , )
snake_case : Union[str, Any] =max_memory
snake_case : Any =infer_auto_device_map(lowerCamelCase_ , **lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
# check if don't have any quantized module on the cpu
snake_case : Optional[int] =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
snake_case : Any ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None ):
if modules_to_not_convert is None:
snake_case : Dict =[]
snake_case : str =_replace_with_bnb_layers(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , ):
snake_case : Tuple =False
for name, module in model.named_children():
if current_key_name is None:
snake_case : Dict =[]
current_key_name.append(lowerCamelCase_ )
if isinstance(lowerCamelCase_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
snake_case : Optional[int] =""".""".join(lowerCamelCase_ )
snake_case : Tuple =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
snake_case : List[Any] =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
snake_case : str =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCamelCase_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
snake_case : str =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
snake_case : Dict =module.weight.data
if module.bias is not None:
snake_case : Dict =module.bias.data
bnb_module.requires_grad_(lowerCamelCase_ )
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
snake_case : List[Any] =True
if len(list(module.children() ) ) > 0:
snake_case : Optional[int] =_replace_with_bnb_layers(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
snake_case : Any =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _a ( lowerCamelCase_ ):
# Create a copy of the model
with init_empty_weights():
snake_case : Optional[int] =deepcopy(lowerCamelCase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
snake_case : Optional[int] =find_tied_parameters(lowerCamelCase_ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Optional[Any] =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case : Tuple =sum(lowerCamelCase_ , [] )
snake_case : Optional[Any] =len(lowerCamelCase_ ) > 0
# Check if it is a base model
snake_case : int =False
if hasattr(lowerCamelCase_ , '''base_model_prefix''' ):
snake_case : Any =not hasattr(lowerCamelCase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case : int =list(model.named_children() )
snake_case : Any =[list_modules[-1][0]]
# add last module together with tied weights
snake_case : Optional[Any] =set(lowerCamelCase_ ) - set(lowerCamelCase_ )
snake_case : Any =list(set(lowerCamelCase_ ) ) + list(lowerCamelCase_ )
# remove ".weight" from the keys
snake_case : str =[""".weight""", """.bias"""]
snake_case : Optional[Any] =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case : str =name.replace(lowerCamelCase_ , '''''' )
filtered_module_names.append(lowerCamelCase_ )
return filtered_module_names
def _a ( lowerCamelCase_ ):
for m in model.modules():
if isinstance(lowerCamelCase_ , bnb.nn.Linearabit ):
return True
return False
def _a ( lowerCamelCase_ ):
return next(parameter.parameters() ).device
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(lowerCamelCase_ , lowerCamelCase_ , 0 , dtype=lowerCamelCase_ , value=lowerCamelCase_ )
snake_case : Union[str, Any] =param_name
snake_case : List[str] =model
if "." in tensor_name:
snake_case : int =tensor_name.split('''.''' )
for split in splits[:-1]:
snake_case : Union[str, Any] =getattr(lowerCamelCase_ , lowerCamelCase_ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
snake_case : int =new_module
snake_case : Optional[int] =splits[-1]
# offload weights
snake_case : str =False
offload_weight(module._parameters[tensor_name] , lowerCamelCase_ , lowerCamelCase_ , index=lowerCamelCase_ )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , lowerCamelCase_ , index=lowerCamelCase_ , )
else:
offload_weight(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , index=lowerCamelCase_ )
offload_weight(lowerCamelCase_ , param_name.replace('''weight''' , '''SCB''' ) , lowerCamelCase_ , index=lowerCamelCase_ )
set_module_tensor_to_device(lowerCamelCase_ , lowerCamelCase_ , '''meta''' , dtype=lowerCamelCase_ , value=torch.empty(*param.size() ) )
| 349 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Any = StableUnCLIPImgaImgPipeline
UpperCamelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase : str = frozenset([] )
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE_: Tuple =CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: int =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCAmelCase , projection_dim=lowerCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[Any] =StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: str =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[Any] =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: str =UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase , layers_per_block=1 , upcast_attention=lowerCAmelCase , use_linear_projection=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: str =DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Dict =AutoencoderKL()
SCREAMING_SNAKE_CASE_: List[str] ={
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple=0 , lowerCAmelCase : Union[str, Any]=True ) -> List[str]:
'''simple docstring'''
if str(lowerCAmelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE_: Optional[int] =torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: List[str] =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if pil_image:
SCREAMING_SNAKE_CASE_: Optional[Any] =input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_: Optional[Any] =input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE_: Optional[Any] =input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE_: Any =DiffusionPipeline.numpy_to_pil(lowerCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: str =StableUnCLIPImgaImgPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.get_dummy_inputs(lowerCAmelCase )
inputs.update({"""image_embeds""": None} )
SCREAMING_SNAKE_CASE_: Optional[Any] =sd_pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: int =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_: List[Any] =np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
SCREAMING_SNAKE_CASE_: Any =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_: Optional[int] =torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_: Dict =pipe(lowerCAmelCase , """anime turle""" , generator=lowerCAmelCase , output_type="""np""" )
SCREAMING_SNAKE_CASE_: Optional[int] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
SCREAMING_SNAKE_CASE_: List[str] =StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_: str =pipe(lowerCAmelCase , """anime turle""" , generator=lowerCAmelCase , output_type="""np""" )
SCREAMING_SNAKE_CASE_: List[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_: Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_: str =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_: int =pipe(
lowerCAmelCase , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE_: str =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 409 | 0 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_lowercase = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_lowercase = {
'''vinai/phobert-base''': 2_56,
'''vinai/phobert-large''': 2_56,
}
def _A (UpperCamelCase : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ : List[str] = set()
lowerCamelCase__ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ : Optional[int] = char
lowerCamelCase__ : Any = set(_lowerCamelCase )
return pairs
class __A ( a__ ):
UpperCamelCase :Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase :List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , **__magic_name__ , ):
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , **_A , )
lowerCamelCase__ : List[Any] = vocab_file
lowerCamelCase__ : Dict = merges_file
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Any = 3
self.add_from_file(_A )
lowerCamelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(_A , encoding="""utf-8""" ) as merges_handle:
lowerCamelCase__ : Tuple = merges_handle.read().split("""\n""" )[:-1]
lowerCamelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCamelCase__ : Union[str, Any] = dict(zip(_A , range(len(_A ) ) ) )
lowerCamelCase__ : Union[str, Any] = {}
def _snake_case (self , __magic_name__ , __magic_name__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : Tuple = [self.cls_token_id]
lowerCamelCase__ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case (self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def _snake_case (self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase__ : Tuple = [self.sep_token_id]
lowerCamelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case (self ):
return len(self.encoder )
def _snake_case (self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case (self , __magic_name__ ):
if token in self.cache:
return self.cache[token]
lowerCamelCase__ : str = tuple(_A )
lowerCamelCase__ : int = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowerCamelCase__ : Dict = get_pairs(_A )
if not pairs:
return token
while True:
lowerCamelCase__ : str = min(_A , key=lambda __magic_name__ : self.bpe_ranks.get(_A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ : int = bigram
lowerCamelCase__ : int = []
lowerCamelCase__ : List[Any] = 0
while i < len(_A ):
try:
lowerCamelCase__ : int = word.index(_A , _A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase__ : Optional[int] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ : int = tuple(_A )
lowerCamelCase__ : Optional[int] = new_word
if len(_A ) == 1:
break
else:
lowerCamelCase__ : List[str] = get_pairs(_A )
lowerCamelCase__ : str = '@@ '.join(_A )
lowerCamelCase__ : Union[str, Any] = word[:-4]
lowerCamelCase__ : Dict = word
return word
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : List[str] = re.findall(R"""\S+\n?""" , _A )
for token in words:
split_tokens.extend(list(self.bpe(_A ).split(""" """ ) ) )
return split_tokens
def _snake_case (self , __magic_name__ ):
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def _snake_case (self , __magic_name__ ):
return self.decoder.get(_A , self.unk_token )
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : List[str] = ' '.join(_A ).replace("""@@ """ , """""" ).strip()
return out_string
def _snake_case (self , __magic_name__ , __magic_name__ = None ):
if not os.path.isdir(_A ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase__ : Any = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ : List[Any] = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
if os.path.abspath(self.merges_file ) != os.path.abspath(_A ):
copyfile(self.merges_file , _A )
return out_vocab_file, out_merge_file
def _snake_case (self , __magic_name__ ):
if isinstance(_A , _A ):
try:
with open(_A , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
lowerCamelCase__ : List[str] = f.readlines()
for lineTmp in lines:
lowerCamelCase__ : List[str] = lineTmp.strip()
lowerCamelCase__ : Optional[Any] = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected \'<token> <cnt>\'""" )
lowerCamelCase__ : Optional[Any] = line[:idx]
lowerCamelCase__ : Optional[Any] = len(self.encoder )
| 716 |
from string import ascii_uppercase
_lowercase = {char: i for i, char in enumerate(ascii_uppercase)}
_lowercase = dict(enumerate(ascii_uppercase))
def _A (UpperCamelCase : str , UpperCamelCase : str ) ->str:
'''simple docstring'''
lowerCamelCase__ : List[str] = len(UpperCamelCase )
lowerCamelCase__ : int = 0
while True:
if x == i:
lowerCamelCase__ : Union[str, Any] = 0
if len(UpperCamelCase ) == len(UpperCamelCase ):
break
key += key[i]
i += 1
return key
def _A (UpperCamelCase : str , UpperCamelCase : str ) ->str:
'''simple docstring'''
lowerCamelCase__ : int = """"""
lowerCamelCase__ : str = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowerCamelCase__ : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _A (UpperCamelCase : str , UpperCamelCase : str ) ->str:
'''simple docstring'''
lowerCamelCase__ : Dict = """"""
lowerCamelCase__ : Tuple = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowerCamelCase__ : List[Any] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _A () ->None:
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = """THE GERMAN ATTACK"""
lowerCamelCase__ : Union[str, Any] = """SECRET"""
lowerCamelCase__ : List[str] = generate_key(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : List[Any] = cipher_text(UpperCamelCase , UpperCamelCase )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(UpperCamelCase , UpperCamelCase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 96 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
UpperCamelCase_ = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
UpperCamelCase_ = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
UpperCamelCase_ = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE( datasets.Metric ):
def __lowerCamelCase ( self : List[str] ) -> str:
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def __lowerCamelCase ( self : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int = CHRF.CHAR_ORDER , UpperCamelCase_ : int = CHRF.WORD_ORDER , UpperCamelCase_ : int = CHRF.BETA , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , ) -> int:
SCREAMING_SNAKE_CASE__ :Dict = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
SCREAMING_SNAKE_CASE__ :Dict = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
SCREAMING_SNAKE_CASE__ :int = CHRF(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ :Dict = sb_chrf.corpus_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 209 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A_ : int = get_tests_dir('fixtures')
A_ : int = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
A_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 0
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE : Optional[Any] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ).to_dict()
config_dict.pop('feature_extractor_type' )
SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor(**_SCREAMING_SNAKE_CASE )
# save in new folder
model_config.save_pretrained(_SCREAMING_SNAKE_CASE )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE : str = AutoFeatureExtractor.from_pretrained('bert-base' )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE , revision='aaaaaa' )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
SCREAMING_SNAKE_CASE : int = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def _lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register('custom' , _SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE : Union[str, Any] = CustomFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = True
try:
AutoConfig.register('custom' , _SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : str = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(_SCREAMING_SNAKE_CASE , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 265 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' ,'False')) is not True ,reason='Skipping test because should only be run when releasing minor transformers version' ,)
@pytest.mark.usefixtures('sm_env')
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
])
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ) -> Dict:
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="""utf-8""" , check=lowerCamelCase , )
assert hasattr(self , """env""" )
def __lowercase ( self : int , lowerCamelCase : str ) -> List[Any]:
lowerCAmelCase_ : List[str] = F'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
lowerCAmelCase_ : List[str] = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowerCamelCase , instance_count=lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase , py_version="""py36""" , )
def __lowercase ( self : Dict , lowerCamelCase : List[Any] ) -> Optional[int]:
TrainingJobAnalytics(lowerCamelCase ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def __lowercase ( self : Dict , lowerCamelCase : Optional[Any] ) -> str:
# create estimator
lowerCAmelCase_ : Optional[Any] = self.create_estimator(lowerCamelCase )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase_ : List[str] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase_ : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase_ : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase_ : Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase )
| 398 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : Optional[Any] = "▁"
__A : Tuple = {"vocab_file": "sentencepiece.bpe.model"}
__A : Tuple = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__A : int = {
"facebook/xglm-564M": 2048,
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Any , lowerCamelCase : Any , lowerCamelCase : str="<s>" , lowerCamelCase : Optional[int]="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : List[Any]="<s>" , lowerCamelCase : Optional[Any]="<unk>" , lowerCamelCase : int="<pad>" , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : Optional[Any] , ) -> None:
lowerCAmelCase_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowerCAmelCase_ : str = 7
lowerCAmelCase_ : Any = [F'<madeupword{i}>' for i in range(self.num_madeup_words )]
lowerCAmelCase_ : Optional[Any] = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
lowerCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
lowerCAmelCase_ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase_ : List[str] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase_ : Any = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
lowerCAmelCase_ : Union[str, Any] = len(self.sp_model )
lowerCAmelCase_ : Any = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowerCamelCase )
lowerCAmelCase_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ) -> Union[str, Any]:
lowerCAmelCase_ : Union[str, Any] = self.__dict__.copy()
lowerCAmelCase_ : str = None
lowerCAmelCase_ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , lowerCamelCase : List[Any] ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ : int = {}
lowerCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowercase ( self : List[Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowerCAmelCase_ : List[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __lowercase ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase ))
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase ))
def __lowercase ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase_ : Dict = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __lowercase ( self : str ) -> Union[str, Any]:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __lowercase ( self : Optional[Any] ) -> Dict:
lowerCAmelCase_ : Optional[Any] = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : int , lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def __lowercase ( self : int , lowerCamelCase : Dict ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase_ : int = self.sp_model.PieceToId(lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowercase ( self : Dict , lowerCamelCase : Optional[int] ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowercase ( self : List[str] , lowerCamelCase : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase_ : str = """""".join(lowerCamelCase ).replace(lowerCamelCase , """ """ ).strip()
return out_string
def __lowercase ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase_ : List[str] = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , """wb""" ) as fi:
lowerCAmelCase_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 398 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _UpperCAmelCase ( lowercase ):
lowerCamelCase_ : Union[str, Any] = """trajectory_transformer"""
lowerCamelCase_ : Tuple = ["""past_key_values"""]
lowerCamelCase_ : Union[str, Any] = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , UpperCAmelCase : List[Any]=1_00 , UpperCAmelCase : int=5 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : Any=1 , UpperCAmelCase : int=2_49 , UpperCAmelCase : Dict=6 , UpperCAmelCase : List[str]=17 , UpperCAmelCase : str=25 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Any=4 , UpperCAmelCase : str=1_28 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : List[Any]=0.0006 , UpperCAmelCase : List[str]=5_12 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Dict=1E-12 , UpperCAmelCase : str=1 , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=1 , UpperCAmelCase : str=5_02_56 , UpperCAmelCase : Tuple=5_02_56 , **UpperCAmelCase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_ :Optional[int] = action_weight
SCREAMING_SNAKE_CASE_ :int = reward_weight
SCREAMING_SNAKE_CASE_ :int = value_weight
SCREAMING_SNAKE_CASE_ :Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE_ :List[Any] = block_size
SCREAMING_SNAKE_CASE_ :str = action_dim
SCREAMING_SNAKE_CASE_ :Optional[int] = observation_dim
SCREAMING_SNAKE_CASE_ :Any = transition_dim
SCREAMING_SNAKE_CASE_ :List[Any] = learning_rate
SCREAMING_SNAKE_CASE_ :int = n_layer
SCREAMING_SNAKE_CASE_ :Tuple = n_head
SCREAMING_SNAKE_CASE_ :str = n_embd
SCREAMING_SNAKE_CASE_ :List[str] = embd_pdrop
SCREAMING_SNAKE_CASE_ :int = attn_pdrop
SCREAMING_SNAKE_CASE_ :str = resid_pdrop
SCREAMING_SNAKE_CASE_ :Dict = initializer_range
SCREAMING_SNAKE_CASE_ :Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE_ :Any = kaiming_initializer_range
SCREAMING_SNAKE_CASE_ :List[str] = use_cache
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase)
| 631 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class _UpperCAmelCase ( lowercase ):
def __init__( self : Optional[int] , UpperCAmelCase : Any=-1):
# in NER datasets, the last column is usually reserved for NER label
SCREAMING_SNAKE_CASE_ :Tuple = label_idx
def _snake_case ( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Union[Split, str]):
if isinstance(UpperCAmelCase , UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :List[Any] = mode.value
SCREAMING_SNAKE_CASE_ :Optional[Any] = os.path.join(UpperCAmelCase , F"{mode}.txt")
SCREAMING_SNAKE_CASE_ :Tuple = 1
SCREAMING_SNAKE_CASE_ :str = []
with open(UpperCAmelCase , encoding="utf-8") as f:
SCREAMING_SNAKE_CASE_ :Tuple = []
SCREAMING_SNAKE_CASE_ :int = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=UpperCAmelCase , labels=UpperCAmelCase))
guid_index += 1
SCREAMING_SNAKE_CASE_ :Tuple = []
SCREAMING_SNAKE_CASE_ :Any = []
else:
SCREAMING_SNAKE_CASE_ :int = line.split(" ")
words.append(splits[0])
if len(UpperCAmelCase) > 1:
labels.append(splits[self.label_idx].replace("\n" , ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=UpperCAmelCase , labels=UpperCAmelCase))
return examples
def _snake_case ( self : List[Any] , UpperCAmelCase : TextIO , UpperCAmelCase : TextIO , UpperCAmelCase : List):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(UpperCAmelCase)
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = line.split()[0] + " " + preds_list[example_id].pop(0) + "\n"
writer.write(UpperCAmelCase)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0])
def _snake_case ( self : List[str] , UpperCAmelCase : str):
if path:
with open(UpperCAmelCase , "r") as f:
SCREAMING_SNAKE_CASE_ :Any = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _UpperCAmelCase ( lowercase ):
def __init__( self : Dict):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2)
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : str):
if path:
with open(UpperCAmelCase , "r") as f:
SCREAMING_SNAKE_CASE_ :Optional[int] = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ :Dict = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _UpperCAmelCase ( lowercase ):
def _snake_case ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[Split, str]):
if isinstance(UpperCAmelCase , UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :List[str] = mode.value
SCREAMING_SNAKE_CASE_ :List[str] = os.path.join(UpperCAmelCase , F"{mode}.txt")
SCREAMING_SNAKE_CASE_ :Dict = 1
SCREAMING_SNAKE_CASE_ :List[str] = []
with open(UpperCAmelCase , encoding="utf-8") as f:
for sentence in parse_incr(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :List[str] = []
SCREAMING_SNAKE_CASE_ :int = []
for token in sentence:
words.append(token["form"])
labels.append(token["upos"])
assert len(UpperCAmelCase) == len(UpperCAmelCase)
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=UpperCAmelCase , labels=UpperCAmelCase))
guid_index += 1
return examples
def _snake_case ( self : List[Any] , UpperCAmelCase : TextIO , UpperCAmelCase : TextIO , UpperCAmelCase : List):
SCREAMING_SNAKE_CASE_ :List[str] = 0
for sentence in parse_incr(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :str = preds_list[example_id]
SCREAMING_SNAKE_CASE_ :List[Any] = ""
for token in sentence:
out += F"{token['form']} ({token['upos']}|{s_p.pop(0)}) "
out += "\n"
writer.write(UpperCAmelCase)
example_id += 1
def _snake_case ( self : Tuple , UpperCAmelCase : str):
if path:
with open(UpperCAmelCase , "r") as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 631 | 1 |
def _SCREAMING_SNAKE_CASE( snake_case_ : str ) ->bool:
'''simple docstring'''
_lowercase : Dict = [int(snake_case_ ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(snake_case_ ) == 4 and all(0 <= int(snake_case_ ) <= 2_54 for octet in octets )
if __name__ == "__main__":
lowerCamelCase__ = input().strip()
lowerCamelCase__ = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 700 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ = {
'yjernite/retribert-base-uncased': 5_12,
}
lowerCamelCase__ = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _lowerCAmelCase ( __A ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = RetriBertTokenizer
snake_case_ = ['input_ids', 'attention_mask']
def __init__( self : str , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : str="[UNK]" , UpperCamelCase_ : Optional[int]="[SEP]" , UpperCamelCase_ : Union[str, Any]="[PAD]" , UpperCamelCase_ : List[Any]="[CLS]" , UpperCamelCase_ : int="[MASK]" , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : List[Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
_lowercase : Any = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
_lowercase : Any = do_lower_case
_lowercase : List[Any] = strip_accents
_lowercase : Union[str, Any] = tokenize_chinese_chars
_lowercase : Optional[int] = normalizer_class(**UpperCamelCase_ )
_lowercase : str = do_lower_case
def __lowercase ( self : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str=None ) -> Any:
'''simple docstring'''
_lowercase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : int , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowercase : List[str] = [self.sep_token_id]
_lowercase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_lowercase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 411 | 0 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _a ( ) -> tuple[list[int], int]:
"""simple docstring"""
__snake_case : str = [randint(-1000 , 1000 ) for i in range(10 )]
__snake_case : str = randint(-5000 , 5000 )
return (arr, r)
__UpperCamelCase = make_dataset()
def _a ( _lowerCamelCase , _lowerCamelCase ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(_lowerCamelCase , 3 ):
if sum(_lowerCamelCase ) == target:
return tuple(sorted(_lowerCamelCase ) )
return (0, 0, 0)
def _a ( _lowerCamelCase , _lowerCamelCase ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
__snake_case : Any = len(_lowerCamelCase )
for i in range(n - 1 ):
__snake_case , __snake_case : Optional[int] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _a ( ) -> tuple[float, float]:
"""simple docstring"""
__snake_case : Tuple = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
__snake_case : int = """
triplet_sum1(*dataset)
"""
__snake_case : List[Any] = """
triplet_sum2(*dataset)
"""
__snake_case : str = repeat(setup=_lowerCamelCase , stmt=_lowerCamelCase , repeat=5 , number=1_0000 )
__snake_case : int = repeat(setup=_lowerCamelCase , stmt=_lowerCamelCase , repeat=5 , number=1_0000 )
return (min(_lowerCamelCase ), min(_lowerCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCamelCase = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 26 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : int = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ["""OwlViTFeatureExtractor"""]
lowerCAmelCase : int = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 444 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None)
snake_case_ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case_ : Any = df.iloc[:, 1:2]
snake_case_ : str = actual_data.values.reshape(len_data, 1)
snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data)
snake_case_ : List[str] = 10
snake_case_ : Any = 5
snake_case_ : Any = 20
snake_case_ : Tuple = len_data - periods * look_back
snake_case_ : str = actual_data[:division]
snake_case_ : Optional[int] = actual_data[division - look_back :]
snake_case_ ,snake_case_ : Any = [], []
snake_case_ ,snake_case_ : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case_ : Any = np.array(train_x)
snake_case_ : Optional[Any] = np.array(test_x)
snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y])
snake_case_ : List[Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
snake_case_ : Dict = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
snake_case_ : Optional[Any] = model.predict(x_test)
| 644 |
'''simple docstring'''
import sys
import turtle
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None:
my_pen.up()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
if depth == 0:
return
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
snake_case_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 644 | 1 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str]=13 , SCREAMING_SNAKE_CASE : Optional[Any]=32 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : List[Any]=4 , SCREAMING_SNAKE_CASE : str=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : Any=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Optional[Any]=37 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : List[str]=10 , SCREAMING_SNAKE_CASE : List[str]=0.0_2 , SCREAMING_SNAKE_CASE : Tuple=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : str=[2, 3, 4] , SCREAMING_SNAKE_CASE : Union[str, Any]=None , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = parent
UpperCamelCase__ : Any = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Union[str, Any] = num_channels
UpperCamelCase__ : Tuple = num_stages
UpperCamelCase__ : List[str] = hidden_sizes
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : Any = is_training
UpperCamelCase__ : List[str] = use_labels
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : str = num_labels
UpperCamelCase__ : Union[str, Any] = initializer_range
UpperCamelCase__ : Optional[int] = out_features
UpperCamelCase__ : Union[str, Any] = out_indices
UpperCamelCase__ : Optional[int] = scope
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : List[Any] = None
if self.use_labels:
UpperCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Dict ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = ConvNextVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : str = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
UpperCamelCase__ : Tuple = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Dict = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
UpperCamelCase__ : Tuple = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase__ : Tuple = None
UpperCamelCase__ : str = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Optional[int] = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Optional[int] = config_and_inputs
UpperCamelCase__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = config_and_inputs
UpperCamelCase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class __a ( _a , _a , unittest.TestCase ):
_lowerCAmelCase : Optional[Any] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : str = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : str = False
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : Any = False
_lowerCAmelCase : Any = False
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = ConvNextVaModelTester(self )
UpperCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : Dict ):
'''simple docstring'''
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCamelCase__ , UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCamelCase__ : Any = True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE ),
*get_values(SCREAMING_SNAKE_CASE ),
]:
continue
UpperCamelCase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : List[str] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCamelCase__ : List[Any] = False
UpperCamelCase__ : Optional[Any] = True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : str = model_class(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : int = [*signature.parameters.keys()]
UpperCamelCase__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ):
UpperCamelCase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ : int = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def __lowercase ( self : Any ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
UpperCamelCase__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def __lowercase ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = self.default_image_processor
UpperCamelCase__ : Optional[int] = prepare_img()
UpperCamelCase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase__ : List[Any] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) | 228 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowercase (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int=None , snake_case__ : Any=None , snake_case__ : List[Any]=None , snake_case__ : int=None , snake_case__ : List[Any]=None , snake_case__ : List[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
lowerCAmelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCAmelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any]=13 , lowerCAmelCase : List[Any]=7 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=False , lowerCAmelCase : Any=99 , lowerCAmelCase : Any=16 , lowerCAmelCase : Any=2 , lowerCAmelCase : Any=4 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : int=0.1 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[str]=32 , lowerCAmelCase : Any=2 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Dict=0 , lowerCAmelCase : Any=0.02 , ):
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = eos_token_id
lowerCAmelCase = pad_token_id
lowerCAmelCase = bos_token_id
lowerCAmelCase = initializer_range
def __lowercase ( self : List[str] ):
lowerCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCAmelCase = shift_tokens_right(lowerCAmelCase , 1 , 2 )
lowerCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase , )
lowerCAmelCase = prepare_blenderbot_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def __lowercase ( self : List[Any] ):
lowerCAmelCase , lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowercase ( self : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple ):
lowerCAmelCase = 20
lowerCAmelCase = model_class_name(lowerCAmelCase )
lowerCAmelCase = model.encode(inputs_dict["""input_ids"""] )
lowerCAmelCase , lowerCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase , )
lowerCAmelCase = model.decode(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def __lowercase ( self : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Any ):
lowerCAmelCase = 20
lowerCAmelCase = model_class_name(lowerCAmelCase )
lowerCAmelCase = model.encode(inputs_dict["""input_ids"""] )
lowerCAmelCase , lowerCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
lowerCAmelCase = model.decode(lowerCAmelCase , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase )
lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_a = 99
def __lowercase ( self : List[Any] ):
lowerCAmelCase = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __lowercase ( self : List[str] ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._get_config_and_data()
lowerCAmelCase = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase )
lowerCAmelCase = lm_model(input_ids=lowerCAmelCase )
lowerCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase )
def __lowercase ( self : int ):
lowerCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCAmelCase = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase )
lowerCAmelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowerCAmelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowerCAmelCase = lm_model(input_ids=lowerCAmelCase , decoder_input_ids=lowerCAmelCase )
lowerCAmelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase )
def __lowercase ( self : List[str] ):
lowerCAmelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowerCAmelCase = shift_tokens_right(lowerCAmelCase , 1 , 2 )
lowerCAmelCase = np.equal(lowerCAmelCase , 1 ).astype(np.floataa ).sum()
lowerCAmelCase = np.equal(lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase , _a ):
_a = True
_a = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_a = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __lowercase ( self : List[Any] ):
lowerCAmelCase = FlaxBlenderbotModelTester(self )
def __lowercase ( self : Dict ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = model_class(lowerCAmelCase )
@jax.jit
def encode_jitted(lowerCAmelCase : str , lowerCAmelCase : int=None , **lowerCAmelCase : str ):
return model.encode(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
lowerCAmelCase = encode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCAmelCase = encode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowercase ( self : Tuple ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase = model_class(lowerCAmelCase )
lowerCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
lowerCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
return model.decode(
decoder_input_ids=lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , encoder_outputs=lowerCAmelCase , )
with self.subTest("""JIT Enabled""" ):
lowerCAmelCase = decode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCAmelCase = decode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowercase ( self : List[Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCAmelCase = np.ones((1, 1) ) * model.config.eos_token_id
lowerCAmelCase = model(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def __lowercase ( self : str ):
lowerCAmelCase = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
lowerCAmelCase = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
lowerCAmelCase = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCAmelCase )
lowerCAmelCase = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
lowerCAmelCase = ["""Sam"""]
lowerCAmelCase = tokenizer(lowerCAmelCase , return_tensors="""jax""" )
lowerCAmelCase = model.generate(**lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = """Sam is a great name. It means \"sun\" in Gaelic."""
lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase , **lowerCAmelCase )
assert generated_txt[0].strip() == tgt_text
| 169 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case :
def __init__( self : Tuple , A : List[str] , A : Union[str, Any]=1_3 , A : str=1_0 , A : Tuple=3 , A : int=2 , A : str=2 , A : Tuple=2 , A : int=True , A : Optional[int]=True , A : Optional[Any]=3_2 , A : Any=5 , A : List[str]=4 , A : int=3_7 , A : int="gelu" , A : int=0.1 , A : Dict=0.1 , A : Optional[int]=1_0 , A : int=0.02 , A : Dict=0.9 , A : str=None , ):
'''simple docstring'''
a : Optional[Any] = parent
a : Tuple = batch_size
a : Tuple = image_size
a : Any = num_channels
a : Optional[int] = patch_size
a : Any = tubelet_size
a : List[str] = num_frames
a : Optional[Any] = is_training
a : List[Any] = use_labels
a : List[Any] = hidden_size
a : List[Any] = num_hidden_layers
a : Optional[int] = num_attention_heads
a : Optional[Any] = intermediate_size
a : Tuple = hidden_act
a : str = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : int = type_sequence_label_size
a : Tuple = initializer_range
a : Dict = mask_ratio
a : Tuple = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
a : Union[str, Any] = (image_size // patch_size) ** 2
a : Tuple = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
a : Dict = int(mask_ratio * self.seq_length )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
a : Tuple = None
if self.use_labels:
a : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int , A : Optional[Any] , A : Tuple , A : Any ):
'''simple docstring'''
a : List[Any] = VideoMAEModel(config=A )
model.to(A )
model.eval()
a : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : int , A : List[str] , A : str , A : Any ):
'''simple docstring'''
a : Any = VideoMAEForPreTraining(A )
model.to(A )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
a : List[Any] = torch.ones((self.num_masks,) )
a : Optional[int] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
a : List[Any] = mask.expand(self.batch_size , -1 ).bool()
a : Union[str, Any] = model(A , A )
# model only returns predictions for masked patches
a : List[str] = mask.sum().item()
a : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : str = self.prepare_config_and_inputs()
a, a, a : List[Any] = config_and_inputs
a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__magic_name__ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__magic_name__ = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : List[str] = VideoMAEModelTester(self )
a : Any = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def lowerCamelCase__ ( self : Union[str, Any] , A : int , A : List[Any] , A : str=False ):
'''simple docstring'''
a : Union[str, Any] = copy.deepcopy(A )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
a : Optional[int] = torch.ones((self.model_tester.num_masks,) )
a : str = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
a : List[str] = mask.expand(self.model_tester.batch_size , -1 ).bool()
a : Optional[int] = bool_masked_pos.to(A )
if return_labels:
if model_class in [
*get_values(A ),
]:
a : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a, a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Optional[int] = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a, a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(A )
a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : List[str] = [*signature.parameters.keys()]
a : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , A )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
@slow
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Optional[Any] = VideoMAEModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
a, a : Any = self.model_tester.prepare_config_and_inputs_for_common()
a : int = True
for model_class in self.all_model_classes:
a : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
a : Tuple = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
a : Dict = True
a : Tuple = False
a : Dict = True
a : List[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : Tuple = model(**self._prepare_for_class(A , A ) )
a : List[Any] = outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a : Optional[int] = True
a : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : List[Any] = model(**self._prepare_for_class(A , A ) )
a : List[str] = outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
a : Union[str, Any] = len(A )
# Check attention is always last and order is fine
a : Optional[Any] = True
a : List[Any] = True
a : Tuple = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : int = model(**self._prepare_for_class(A , A ) )
self.assertEqual(out_len + 1 , len(A ) )
a : Optional[Any] = outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
def check_hidden_states_output(A : List[Any] , A : Optional[int] , A : Dict ):
a : Optional[int] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : Optional[Any] = model(**self._prepare_for_class(A , A ) )
a : Any = outputs.hidden_states
a : Tuple = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(A ) , A )
a : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
a : List[Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
a, a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[str] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : List[Any] = True
check_hidden_states_output(A , A , A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def snake_case ():
'''simple docstring'''
a : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
a : Dict = np.load(A_ )
return list(A_ )
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Optional[Any] = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
A )
a : Any = self.default_image_processor
a : List[str] = prepare_video()
a : List[str] = image_processor(A , return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
a : List[str] = model(**A )
# verify the logits
a : Union[str, Any] = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , A )
a : Any = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : List[str] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(A )
a : Optional[Any] = self.default_image_processor
a : int = prepare_video()
a : Tuple = image_processor(A , return_tensors='pt' ).to(A )
# add boolean mask, indicating which patches to mask
a : Any = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
a : Dict = torch.load(A )
# forward pass
with torch.no_grad():
a : List[str] = model(**A )
# verify the logits
a : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
a : Dict = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=A )
self.assertEqual(outputs.logits.shape , A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , A , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
a : Any = torch.tensor([0.51_42] , device=A )
self.assertTrue(torch.allclose(outputs.loss , A , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
a : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=A ).to(
A )
with torch.no_grad():
a : Dict = model(**A )
a : Optional[int] = torch.tensor(torch.tensor([0.64_69] ) , device=A )
self.assertTrue(torch.allclose(outputs.loss , A , atol=1E-4 ) )
| 118 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : int = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 118 | 1 |
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = multiprocessing.Manager()
_lowerCamelCase : Optional[int] = manager.list()
_lowerCamelCase : Union[str, Any] = multiprocessing.Process(target=_lowerCamelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_lowerCamelCase : Dict = shutil.rmtree
_lowerCamelCase : Optional[int] = os.rmdir
_lowerCamelCase : List[str] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_lowerCamelCase : Optional[int] = {}
with swallow_io():
with time_limit(_lowerCamelCase ):
exec(_lowerCamelCase , _lowerCamelCase )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(F"""failed: {e}""" )
# Needed for cleaning up.
_lowerCamelCase : str = rmtree
_lowerCamelCase : Optional[Any] = rmdir
_lowerCamelCase : List[str] = chdir
@contextlib.contextmanager
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
def signal_handler(_lowerCamelCase , _lowerCamelCase ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , _lowerCamelCase )
signal.signal(signal.SIGALRM , _lowerCamelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = WriteOnlyStringIO()
with contextlib.redirect_stdout(_lowerCamelCase ):
with contextlib.redirect_stderr(_lowerCamelCase ):
with redirect_stdin(_lowerCamelCase ):
yield
@contextlib.contextmanager
def lowerCamelCase_( ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(_lowerCamelCase ):
yield dirname
class A_ ( _a ):
pass
class A_ ( io.StringIO ):
def _lowercase ( self: Optional[Any] ,*__lowerCAmelCase: int ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
raise OSError
def _lowercase ( self: Union[str, Any] ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: Dict ):
'''simple docstring'''
raise OSError
def _lowercase ( self: Any ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
raise OSError
def _lowercase ( self: Dict ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: List[Any] ):
'''simple docstring'''
return False
class A_ ( contextlib._RedirectStream ): # type: ignore
lowerCAmelCase__ = 'stdin'
@contextlib.contextmanager
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
if root == ".":
yield
return
_lowerCamelCase : List[str] = os.getcwd()
os.chdir(_lowerCamelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase=None ) -> str:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Tuple = None
import os
_lowerCamelCase : List[str] = "1"
_lowerCamelCase : Dict = None
_lowerCamelCase : Any = None
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : str = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Dict = None
_lowerCamelCase : Dict = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : int = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : str = None
_lowerCamelCase : str = None
_lowerCamelCase : Tuple = None
_lowerCamelCase : int = None
_lowerCamelCase : Any = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : str = None
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Tuple = None
_lowerCamelCase : Any = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Optional[Any] = None
import shutil
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Dict = None
_lowerCamelCase : Dict = None
import subprocess
_lowerCamelCase : Dict = None # type: ignore
_lowerCamelCase : Any = None
import sys
_lowerCamelCase : str = None
_lowerCamelCase : str = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : List[Any] = None | 46 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def _lowerCAmelCase ( lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : int ) -> tuple[complex, complex]:
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
_SCREAMING_SNAKE_CASE : List[Any] = b * b - 4 * a * c
_SCREAMING_SNAKE_CASE : Optional[int] = (-b + sqrt(lowerCamelCase__ )) / (2 * a)
_SCREAMING_SNAKE_CASE : Union[str, Any] = (-b - sqrt(lowerCamelCase__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _lowerCAmelCase ( ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = quadratic_roots(a=5, b=6, c=1 )
print(f'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 572 | 0 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[Any], _snake_case : List[str], _snake_case : Tuple=1_3, _snake_case : int=7, _snake_case : List[Any]=True, _snake_case : Any=True, _snake_case : Any=True, _snake_case : Dict=True, _snake_case : Dict=True, _snake_case : Dict=False, _snake_case : List[str]=False, _snake_case : List[Any]=False, _snake_case : Any=2, _snake_case : Any=9_9, _snake_case : List[Any]=0, _snake_case : List[str]=3_2, _snake_case : List[str]=5, _snake_case : str=4, _snake_case : Tuple=0.1, _snake_case : Optional[int]=0.1, _snake_case : Dict=5_1_2, _snake_case : Dict=1_2, _snake_case : Union[str, Any]=2, _snake_case : List[Any]=0.0_2, _snake_case : str=3, _snake_case : Tuple=4, _snake_case : Optional[Any]="last", _snake_case : str=None, _snake_case : Any=None, ) ->str:
snake_case__ : Dict = parent
snake_case__ : List[Any] = batch_size
snake_case__ : str = seq_length
snake_case__ : int = is_training
snake_case__ : List[Any] = use_input_lengths
snake_case__ : Optional[int] = use_token_type_ids
snake_case__ : Dict = use_labels
snake_case__ : int = gelu_activation
snake_case__ : Any = sinusoidal_embeddings
snake_case__ : Any = causal
snake_case__ : Union[str, Any] = asm
snake_case__ : List[Any] = n_langs
snake_case__ : List[str] = vocab_size
snake_case__ : str = n_special
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : Union[str, Any] = type_vocab_size
snake_case__ : List[Any] = type_sequence_label_size
snake_case__ : Tuple = initializer_range
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_choices
snake_case__ : int = summary_type
snake_case__ : int = use_proj
snake_case__ : Dict = scope
def lowercase_ ( self : Dict ) ->Union[str, Any]:
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Optional[Any] = None
if self.use_input_lengths:
snake_case__ : str = (
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case__ : int = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.n_langs )
snake_case__ : str = None
snake_case__ : str = None
snake_case__ : str = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
snake_case__ : List[str] = ids_tensor([self.batch_size], 2 ).float()
snake_case__ : str = ids_tensor([self.batch_size], self.num_choices )
snake_case__ : str = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase_ ( self : Optional[int] ) ->Optional[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def lowercase_ ( self : Optional[Any], _snake_case : Tuple, _snake_case : List[Any], _snake_case : Optional[Any], _snake_case : Optional[Any], _snake_case : List[str], _snake_case : Tuple, _snake_case : Dict, _snake_case : Tuple, _snake_case : Union[str, Any], ) ->int:
snake_case__ : Dict = FlaubertModel(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[int] = model(_snake_case, lengths=_snake_case, langs=_snake_case )
snake_case__ : List[str] = model(_snake_case, langs=_snake_case )
snake_case__ : Dict = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Tuple, _snake_case : Tuple, _snake_case : Union[str, Any], _snake_case : Optional[Any], _snake_case : str, _snake_case : Any, _snake_case : int, _snake_case : Dict, _snake_case : Dict, _snake_case : Any, ) ->Optional[Any]:
snake_case__ : Dict = FlaubertWithLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Any = model(_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Any, _snake_case : int, _snake_case : Optional[int], _snake_case : List[Any], _snake_case : Optional[int], _snake_case : List[Any], _snake_case : Optional[Any], _snake_case : Union[str, Any], _snake_case : Optional[Any], _snake_case : Union[str, Any], ) ->int:
snake_case__ : Optional[int] = FlaubertForQuestionAnsweringSimple(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : List[str] = model(_snake_case )
snake_case__ : List[str] = model(_snake_case, start_positions=_snake_case, end_positions=_snake_case )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase_ ( self : List[Any], _snake_case : Dict, _snake_case : Optional[int], _snake_case : List[Any], _snake_case : Union[str, Any], _snake_case : Dict, _snake_case : Optional[int], _snake_case : Tuple, _snake_case : Union[str, Any], _snake_case : List[Any], ) ->Tuple:
snake_case__ : Optional[int] = FlaubertForQuestionAnswering(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[Any] = model(_snake_case )
snake_case__ : Union[str, Any] = model(
_snake_case, start_positions=_snake_case, end_positions=_snake_case, cls_index=_snake_case, is_impossible=_snake_case, p_mask=_snake_case, )
snake_case__ : List[str] = model(
_snake_case, start_positions=_snake_case, end_positions=_snake_case, cls_index=_snake_case, is_impossible=_snake_case, )
((snake_case__) , ) : List[Any] = result_with_labels.to_tuple()
snake_case__ : Optional[int] = model(_snake_case, start_positions=_snake_case, end_positions=_snake_case )
((snake_case__) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def lowercase_ ( self : Union[str, Any], _snake_case : Dict, _snake_case : str, _snake_case : Dict, _snake_case : Optional[int], _snake_case : Union[str, Any], _snake_case : int, _snake_case : Tuple, _snake_case : List[Any], _snake_case : List[str], ) ->str:
snake_case__ : List[Any] = FlaubertForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Tuple = model(_snake_case )
snake_case__ : Dict = model(_snake_case, labels=_snake_case )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self : Union[str, Any], _snake_case : str, _snake_case : int, _snake_case : Any, _snake_case : Union[str, Any], _snake_case : Tuple, _snake_case : Tuple, _snake_case : Union[str, Any], _snake_case : Dict, _snake_case : List[Any], ) ->str:
snake_case__ : Any = self.num_labels
snake_case__ : Union[str, Any] = FlaubertForTokenClassification(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Dict = model(_snake_case, attention_mask=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : Optional[int], _snake_case : Dict, _snake_case : Dict, _snake_case : Union[str, Any], _snake_case : Union[str, Any], _snake_case : Tuple, _snake_case : Tuple, _snake_case : Any, _snake_case : Union[str, Any], _snake_case : Dict, ) ->int:
snake_case__ : Optional[int] = self.num_choices
snake_case__ : List[Any] = FlaubertForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
snake_case__ : int = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
snake_case__ : Any = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
snake_case__ : int = model(
_snake_case, attention_mask=_snake_case, token_type_ids=_snake_case, labels=_snake_case, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase_ ( self : Dict ) ->List[str]:
snake_case__ : List[Any] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : List[Any] = config_and_inputs
snake_case__ : Union[str, Any] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase_ ( self : Optional[Any], _snake_case : int, _snake_case : int, _snake_case : List[Any], _snake_case : Optional[int], _snake_case : List[Any] ) ->Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase_ ( self : List[Any], _snake_case : Union[str, Any], _snake_case : Dict, _snake_case : int=False ) ->Optional[int]:
snake_case__ : Optional[int] = super()._prepare_for_class(_snake_case, _snake_case, return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
snake_case__ : int = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_snake_case )
snake_case__ : List[str] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_snake_case )
return inputs_dict
def lowercase_ ( self : Dict ) ->int:
snake_case__ : Union[str, Any] = FlaubertModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self, config_class=_snake_case, emb_dim=3_7 )
def lowercase_ ( self : Union[str, Any] ) ->Optional[Any]:
self.config_tester.run_common_tests()
def lowercase_ ( self : Any ) ->Optional[Any]:
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_snake_case )
def lowercase_ ( self : Any ) ->Dict:
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_snake_case )
def lowercase_ ( self : List[str] ) ->Union[str, Any]:
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_snake_case )
def lowercase_ ( self : Any ) ->Any:
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_snake_case )
def lowercase_ ( self : Tuple ) ->Optional[Any]:
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_snake_case )
def lowercase_ ( self : Dict ) ->Any:
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_snake_case )
def lowercase_ ( self : Tuple ) ->str:
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_snake_case )
@slow
def lowercase_ ( self : str ) ->List[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[Any] = FlaubertModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@slow
@require_torch_gpu
def lowercase_ ( self : Union[str, Any] ) ->List[Any]:
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
snake_case__ : str = True
snake_case__ : int = model_class(config=_snake_case )
snake_case__ : int = self._prepare_for_class(_snake_case, _snake_case )
snake_case__ : int = torch.jit.trace(
_snake_case, (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_snake_case, os.path.join(_snake_case, 'traced_model.pt' ) )
snake_case__ : List[Any] = torch.jit.load(os.path.join(_snake_case, 'traced_model.pt' ), map_location=_snake_case )
loaded(inputs_dict['input_ids'].to(_snake_case ), inputs_dict['attention_mask'].to(_snake_case ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : Optional[int] ) ->Any:
snake_case__ : List[Any] = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
snake_case__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
snake_case__ : List[Any] = model(_snake_case )[0]
snake_case__ : List[Any] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape, _snake_case )
snake_case__ : Optional[int] = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], _snake_case, atol=1e-4 ) )
| 243 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = KandinskyInpaintPipeline
_SCREAMING_SNAKE_CASE = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_SCREAMING_SNAKE_CASE = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_SCREAMING_SNAKE_CASE = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_SCREAMING_SNAKE_CASE = False
@property
def lowercase_ ( self : Optional[Any] ) ->Optional[Any]:
return 3_2
@property
def lowercase_ ( self : int ) ->str:
return 3_2
@property
def lowercase_ ( self : Any ) ->List[str]:
return self.time_input_dim
@property
def lowercase_ ( self : Optional[Any] ) ->str:
return self.time_input_dim * 4
@property
def lowercase_ ( self : Tuple ) ->int:
return 1_0_0
@property
def lowercase_ ( self : str ) ->Dict:
snake_case__ : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def lowercase_ ( self : Any ) ->Optional[int]:
torch.manual_seed(0 )
snake_case__ : str = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=3_7, num_attention_heads=4, num_hidden_layers=5, vocab_size=1_0_0_5, )
snake_case__ : Optional[Any] = MultilingualCLIP(_snake_case )
snake_case__ : List[Any] = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self : Tuple ) ->Optional[int]:
torch.manual_seed(0 )
snake_case__ : Optional[Any] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case__ : Dict = UNetaDConditionModel(**_snake_case )
return model
@property
def lowercase_ ( self : Dict ) ->Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : Union[str, Any] ) ->List[Any]:
torch.manual_seed(0 )
snake_case__ : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : Any ) ->Any:
snake_case__ : int = self.dummy_text_encoder
snake_case__ : str = self.dummy_tokenizer
snake_case__ : Any = self.dummy_unet
snake_case__ : Tuple = self.dummy_movq
snake_case__ : int = DDIMScheduler(
num_train_timesteps=1_0_0_0, beta_schedule='linear', beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, clip_sample=_snake_case, set_alpha_to_one=_snake_case, steps_offset=1, prediction_type='epsilon', thresholding=_snake_case, )
snake_case__ : Optional[int] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase_ ( self : str, _snake_case : Any, _snake_case : int=0 ) ->str:
snake_case__ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case__ : str = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1 ) ).to(_snake_case )
# create init_image
snake_case__ : Tuple = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case__ : Optional[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
snake_case__ : Tuple = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create mask
snake_case__ : Any = np.ones((6_4, 6_4), dtype=np.floataa )
snake_case__ : Optional[Any] = 0
if str(_snake_case ).startswith('mps' ):
snake_case__ : Union[str, Any] = torch.manual_seed(_snake_case )
else:
snake_case__ : Any = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
snake_case__ : int = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowercase_ ( self : Optional[int] ) ->Optional[Any]:
snake_case__ : int = 'cpu'
snake_case__ : str = self.get_dummy_components()
snake_case__ : Any = self.pipeline_class(**_snake_case )
snake_case__ : Optional[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
snake_case__ : Tuple = pipe(**self.get_dummy_inputs(_snake_case ) )
snake_case__ : List[Any] = output.images
snake_case__ : List[Any] = pipe(
**self.get_dummy_inputs(_snake_case ), return_dict=_snake_case, )[0]
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
snake_case__ : int = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Any = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def lowercase_ ( self : Any ) ->List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Dict ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ) ->List[str]:
snake_case__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
snake_case__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
snake_case__ : Union[str, Any] = np.ones((7_6_8, 7_6_8), dtype=np.floataa )
snake_case__ : str = 0
snake_case__ : List[str] = 'a hat'
snake_case__ : Any = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior', torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
snake_case__ : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint', torch_dtype=torch.floataa )
snake_case__ : Tuple = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
snake_case__ : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case__ , snake_case__ : Tuple = pipe_prior(
_snake_case, generator=_snake_case, num_inference_steps=5, negative_prompt='', ).to_tuple()
snake_case__ : Optional[Any] = pipeline(
_snake_case, image=_snake_case, mask_image=_snake_case, image_embeds=_snake_case, negative_image_embeds=_snake_case, generator=_snake_case, num_inference_steps=1_0_0, height=7_6_8, width=7_6_8, output_type='np', )
snake_case__ : Dict = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_snake_case, _snake_case )
| 243 | 1 |
"""simple docstring"""
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCAmelCase_ : Any = '''Usage of script: script_name <size_of_canvas:int>'''
lowerCAmelCase_ : Optional[int] = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [[False for i in range(a_ )] for j in range(a_ )]
return canvas
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
for i, row in enumerate(a_ ):
for j, _ in enumerate(a_ ):
UpperCAmelCase = bool(random.getrandbits(1 ) )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = np.array(a_ )
UpperCAmelCase = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(a_ ):
for c, pt in enumerate(a_ ):
UpperCAmelCase = __judge_point(
a_ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
UpperCAmelCase = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
UpperCAmelCase = current_canvas.tolist()
return return_canvas
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
UpperCAmelCase = pt
if pt:
if alive < 2:
UpperCAmelCase = False
elif alive == 2 or alive == 3:
UpperCAmelCase = True
elif alive > 3:
UpperCAmelCase = False
else:
if alive == 3:
UpperCAmelCase = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCAmelCase_ : Dict = int(sys.argv[1])
# main working structure of this module.
lowerCAmelCase_ : Tuple = create_canvas(canvas_size)
seed(c)
lowerCAmelCase_ , lowerCAmelCase_ : Dict = plt.subplots()
fig.show()
lowerCAmelCase_ : List[Any] = ListedColormap(['''w''', '''k'''])
try:
while True:
lowerCAmelCase_ : List[Any] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 673 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class __lowercase ( __magic_name__ ):
_a = """imagegpt"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase=512 + 1 , UpperCamelCase=32 * 32 , UpperCamelCase=512 , UpperCamelCase=24 , UpperCamelCase=8 , UpperCamelCase=None , UpperCamelCase="quick_gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=1e-5 , UpperCamelCase=0.02 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False , **UpperCamelCase , ) -> int:
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = n_inner
__a = activation_function
__a = resid_pdrop
__a = embd_pdrop
__a = attn_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = scale_attn_weights
__a = use_cache
__a = scale_attn_by_inverse_layer_idx
__a = reorder_and_upcast_attn
__a = tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCamelCase , **UpperCamelCase )
class __lowercase ( __magic_name__ ):
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = 1 , UpperCamelCase = -1 , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = 3 , UpperCamelCase = 32 , UpperCamelCase = 32 , ) -> Mapping[str, Any]:
__a = self._generate_dummy_images(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__a = dict(preprocessor(images=UpperCamelCase , return_tensors=UpperCamelCase ) )
return inputs
| 539 | 0 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowercase : List[str] = HUGGINGFACE_HUB_CACHE
lowercase : Union[str, Any] = 'config.json'
lowercase : List[Any] = 'diffusion_pytorch_model.bin'
lowercase : Dict = 'diffusion_flax_model.msgpack'
lowercase : Union[str, Any] = 'model.onnx'
lowercase : Optional[Any] = 'diffusion_pytorch_model.safetensors'
lowercase : Optional[int] = 'weights.pb'
lowercase : Tuple = 'https://huggingface.co'
lowercase : Any = default_cache_path
lowercase : str = 'diffusers_modules'
lowercase : int = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
lowercase : str = ['fp16', 'non-ema']
lowercase : int = '.self_attn' | 94 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers | 94 | 1 |
from math import factorial
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : int = real
if isinstance(__magic_name__, __magic_name__ ):
UpperCamelCase__ : Tuple = [1] * rank
else:
UpperCamelCase__ : str = rank
def __repr__( self ) -> str:
"""simple docstring"""
return (
f"{self.real}+"
f"{'+'.join(str(__magic_name__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real, __magic_name__ )
def __add__( self, __magic_name__ ) -> Tuple:
"""simple docstring"""
if not isinstance(__magic_name__, __magic_name__ ):
return Dual(self.real + other, self.duals )
UpperCamelCase__ : str = self.duals.copy()
UpperCamelCase__ : Optional[int] = other.duals.copy()
if len(__magic_name__ ) > len(__magic_name__ ):
o_dual.extend([1] * (len(__magic_name__ ) - len(__magic_name__ )) )
elif len(__magic_name__ ) < len(__magic_name__ ):
s_dual.extend([1] * (len(__magic_name__ ) - len(__magic_name__ )) )
UpperCamelCase__ : Tuple = []
for i in range(len(__magic_name__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real, __magic_name__ )
a : Union[str, Any] = __add__
def __sub__( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
return self + other * -1
def __mul__( self, __magic_name__ ) -> List[Any]:
"""simple docstring"""
if not isinstance(__magic_name__, __magic_name__ ):
UpperCamelCase__ : Any = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other, __magic_name__ )
UpperCamelCase__ : int = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real, __magic_name__ )
a : List[str] = __mul__
def __truediv__( self, __magic_name__ ) -> Any:
"""simple docstring"""
if not isinstance(__magic_name__, __magic_name__ ):
UpperCamelCase__ : Any = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other, __magic_name__ )
raise ValueError
def __floordiv__( self, __magic_name__ ) -> List[str]:
"""simple docstring"""
if not isinstance(__magic_name__, __magic_name__ ):
UpperCamelCase__ : Optional[int] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other, __magic_name__ )
raise ValueError
def __pow__( self, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if n < 0 or isinstance(__magic_name__, __magic_name__ ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
UpperCamelCase__ : Dict = self
for _ in range(n - 1 ):
x *= self
return x
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: str , __UpperCAmelCase: Dict ) -> int:
if not callable(__UpperCAmelCase ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(__UpperCAmelCase , (float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''differentiate() requires an int as input for order''' )
UpperCamelCase__ : Optional[Any] = Dual(__UpperCAmelCase , 1 )
UpperCamelCase__ : Union[str, Any] = func(__UpperCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> Tuple:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 253 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Dict = data
UpperCamelCase__ : Dict = [0x6_7_4_5_2_3_0_1, 0xE_F_C_D_A_B_8_9, 0x9_8_B_A_D_C_F_E, 0x1_0_3_2_5_4_7_6, 0xC_3_D_2_E_1_F_0]
@staticmethod
def UpperCamelCase__ ( __magic_name__, __magic_name__ ) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0xF_F_F_F_F_F_F_F
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Any = b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCamelCase__ : Tuple = self.data + padding + struct.pack('''>Q''', 8 * len(self.data ) )
return padded_data
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 )
]
def UpperCamelCase__ ( self, __magic_name__ ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : str = list(struct.unpack('''>16L''', __magic_name__ ) ) + [0] * 64
for i in range(16, 80 ):
UpperCamelCase__ : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 )
return w
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : int = self.padding()
UpperCamelCase__ : Dict = self.split_blocks()
for block in self.blocks:
UpperCamelCase__ : Tuple = self.expand_block(__magic_name__ )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = self.h
for i in range(0, 80 ):
if 0 <= i < 20:
UpperCamelCase__ : List[str] = (b & c) | ((~b) & d)
UpperCamelCase__ : List[str] = 0x5_A_8_2_7_9_9_9
elif 20 <= i < 40:
UpperCamelCase__ : Dict = b ^ c ^ d
UpperCamelCase__ : Union[str, Any] = 0x6_E_D_9_E_B_A_1
elif 40 <= i < 60:
UpperCamelCase__ : List[str] = (b & c) | (b & d) | (c & d)
UpperCamelCase__ : int = 0x8_F_1_B_B_C_D_C
elif 60 <= i < 80:
UpperCamelCase__ : List[str] = b ^ c ^ d
UpperCamelCase__ : str = 0xC_A_6_2_C_1_D_6
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = (
self.rotate(__magic_name__, 5 ) + f + e + k + expanded_block[i] & 0xF_F_F_F_F_F_F_F,
a,
self.rotate(__magic_name__, 30 ),
c,
d,
)
UpperCamelCase__ : int = (
self.h[0] + a & 0xF_F_F_F_F_F_F_F,
self.h[1] + b & 0xF_F_F_F_F_F_F_F,
self.h[2] + c & 0xF_F_F_F_F_F_F_F,
self.h[3] + d & 0xF_F_F_F_F_F_F_F,
self.h[4] + e & 0xF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h )
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase__ : List[str] = B'''Test String'''
assert SHAaHash(__UpperCAmelCase ).final_hash() == hashlib.shaa(__UpperCAmelCase ).hexdigest() # noqa: S324
def lowerCAmelCase_ ( ) -> str:
UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCamelCase__ : Optional[int] = parser.parse_args()
UpperCamelCase__ : Any = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCamelCase__ : List[Any] = f.read()
else:
UpperCamelCase__ : Dict = bytes(__UpperCAmelCase , '''utf-8''' )
print(SHAaHash(__UpperCAmelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 253 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Optional[int] , UpperCamelCase_: pyspark.sql.DataFrame , UpperCamelCase_: Optional[NamedSplit] = None , UpperCamelCase_: Optional[Features] = None , UpperCamelCase_: bool = True , UpperCamelCase_: str = None , UpperCamelCase_: bool = False , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , UpperCamelCase_: str = "arrow" , **UpperCamelCase_: str , ):
super().__init__(
split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = load_from_cache_file
__lowerCamelCase = file_format
__lowerCamelCase = Spark(
df=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , working_dir=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Tuple ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__lowerCamelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[float] , A__ : list[float] ):
'''simple docstring'''
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase, __lowerCamelCase = divmod(len(A__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of first array: ').split()]
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 80 | 1 |
'''simple docstring'''
import os
import sys
import unittest
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_SCREAMING_SNAKE_CASE = os.path.join(git_repo_path, "src", "diffusers")
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : List[str]) -> Dict:
A_ = find_backend(' if not is_torch_available():')
self.assertEqual(_lowercase , 'torch')
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A_ = find_backend(' if not (is_torch_available() and is_transformers_available()):')
self.assertEqual(_lowercase , 'torch_and_transformers')
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A_ = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):')
self.assertEqual(_lowercase , 'torch_and_transformers_and_onnx')
def __snake_case ( self : List[Any]) -> List[str]:
A_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , _lowercase)
self.assertIn('torch_and_transformers' , _lowercase)
self.assertIn('flax_and_transformers' , _lowercase)
self.assertIn('torch_and_transformers_and_onnx' , _lowercase)
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'])
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'])
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'])
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'])
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'])
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'])
def __snake_case ( self : str) -> int:
A_ = create_dummy_object('CONSTANT' , '\'torch\'')
self.assertEqual(_lowercase , '\nCONSTANT = None\n')
A_ = create_dummy_object('function' , '\'torch\'')
self.assertEqual(
_lowercase , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n')
A_ = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
A_ = create_dummy_object('FakeClass' , '\'torch\'')
self.assertEqual(_lowercase , _lowercase)
def __snake_case ( self : Any) -> List[Any]:
A_ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
A_ = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']})
self.assertEqual(dummy_files['torch'] , _lowercase)
| 366 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self : str , _lowercase : List[str] , _lowercase : Dict=13 , _lowercase : List[str]=7 , _lowercase : Union[str, Any]=True , _lowercase : int=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : List[Any]=99 , _lowercase : Optional[Any]=[1, 1, 2] , _lowercase : Optional[Any]=1 , _lowercase : List[str]=32 , _lowercase : Dict=4 , _lowercase : List[str]=8 , _lowercase : List[str]=37 , _lowercase : int="gelu_new" , _lowercase : Optional[int]=0.1 , _lowercase : List[str]=0.1 , _lowercase : Union[str, Any]=0.0 , _lowercase : str=512 , _lowercase : Optional[Any]=3 , _lowercase : str=0.02 , _lowercase : Union[str, Any]=3 , _lowercase : Optional[Any]=4 , _lowercase : Tuple=None , _lowercase : List[Any]=False , ) -> List[str]:
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = block_sizes
A_ = num_decoder_layers
A_ = d_model
A_ = n_head
A_ = d_head
A_ = d_inner
A_ = hidden_act
A_ = hidden_dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = 2
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = initializer_std
# Used in the tests to check the size of the first attention layer
A_ = n_head
# Used in the tests to check the size of the first hidden state
A_ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
A_ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
A_ = self.num_hidden_layers + 2
def __snake_case ( self : str) -> List[str]:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length])
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A_ = ids_tensor([self.batch_size] , self.num_choices)
A_ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self : Tuple , _lowercase : Tuple , _lowercase : str , _lowercase : int , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : str , _lowercase : str , ) -> Dict:
A_ = TFFunnelModel(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
A_ = [input_ids, input_mask]
A_ = model(_lowercase)
A_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
A_ = False
A_ = TFFunnelModel(config=_lowercase)
A_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
A_ = False
A_ = TFFunnelModel(config=_lowercase)
A_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def __snake_case ( self : Union[str, Any] , _lowercase : int , _lowercase : Tuple , _lowercase : int , _lowercase : int , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[int] , ) -> Dict:
A_ = TFFunnelBaseModel(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
A_ = [input_ids, input_mask]
A_ = model(_lowercase)
A_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
A_ = False
A_ = TFFunnelBaseModel(config=_lowercase)
A_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
A_ = False
A_ = TFFunnelBaseModel(config=_lowercase)
A_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def __snake_case ( self : int , _lowercase : List[str] , _lowercase : int , _lowercase : List[str] , _lowercase : str , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ) -> List[Any]:
A_ = TFFunnelForPreTraining(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def __snake_case ( self : List[str] , _lowercase : str , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : Tuple , ) -> Union[str, Any]:
A_ = TFFunnelForMaskedLM(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __snake_case ( self : Union[str, Any] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Any , _lowercase : List[str] , _lowercase : int , _lowercase : Optional[int] , _lowercase : str , ) -> Dict:
A_ = self.num_labels
A_ = TFFunnelForSequenceClassification(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : Optional[int] , _lowercase : int , _lowercase : Optional[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : str , ) -> List[str]:
A_ = self.num_choices
A_ = TFFunnelForMultipleChoice(config=_lowercase)
A_ = tf.tile(tf.expand_dims(_lowercase , 1) , (1, self.num_choices, 1))
A_ = tf.tile(tf.expand_dims(_lowercase , 1) , (1, self.num_choices, 1))
A_ = tf.tile(tf.expand_dims(_lowercase , 1) , (1, self.num_choices, 1))
A_ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
A_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __snake_case ( self : Any , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : int , _lowercase : int , _lowercase : Dict , ) -> List[str]:
A_ = self.num_labels
A_ = TFFunnelForTokenClassification(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __snake_case ( self : Optional[int] , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : Union[str, Any] , ) -> Optional[int]:
A_ = TFFunnelForQuestionAnswering(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __snake_case ( self : str) -> Union[str, Any]:
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCamelCase = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
def __snake_case ( self : Optional[int]) -> Optional[Any]:
A_ = TFFunnelModelTester(self)
A_ = ConfigTester(self , config_class=_lowercase)
def __snake_case ( self : Any) -> Optional[Any]:
self.config_tester.run_common_tests()
def __snake_case ( self : Union[str, Any]) -> Union[str, Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def __snake_case ( self : List[Any]) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase)
def __snake_case ( self : Any) -> Optional[int]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase)
def __snake_case ( self : List[str]) -> Tuple:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase)
def __snake_case ( self : int) -> Dict:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase)
@require_tf
class __UpperCAmelCase ( lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase = False
_UpperCamelCase = False
def __snake_case ( self : str) -> Dict:
A_ = TFFunnelModelTester(self , base=_lowercase)
A_ = ConfigTester(self , config_class=_lowercase)
def __snake_case ( self : Optional[Any]) -> Tuple:
self.config_tester.run_common_tests()
def __snake_case ( self : List[str]) -> List[str]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_lowercase)
def __snake_case ( self : Dict) -> Dict:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase)
def __snake_case ( self : Union[str, Any]) -> int:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase)
| 366 | 1 |
"""simple docstring"""
import sys
def lowercase__ ( lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = len(lowercase__ )
UpperCAmelCase = [[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
UpperCAmelCase = [[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase = a + chain_length - 1
UpperCAmelCase = sys.maxsize
for c in range(lowercase__ , lowercase__ ):
UpperCAmelCase = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase = cost
UpperCAmelCase = c
return matrix, sol
def lowercase__ ( lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
if i == j:
print('A' + str(lowercase__ ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(')' , end=' ' )
def lowercase__ ( ) -> str:
"""simple docstring"""
UpperCAmelCase = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase = len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase , UpperCAmelCase = matrix_chain_order(lowercase__ )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 702 |
"""simple docstring"""
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : list ) -> int:
"""simple docstring"""
_enforce_args(lowerCAmelCase , lowerCAmelCase )
if n == 0:
return 0
UpperCAmelCase = float('-inf' )
for i in range(1 , n + 1 ):
UpperCAmelCase = max(
lowerCAmelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , lowerCAmelCase ) )
return max_revue
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : list ) -> Dict:
"""simple docstring"""
_enforce_args(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : list , lowerCAmelCase : list ) -> int:
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCAmelCase = float('-inf' )
for i in range(1 , n + 1 ):
UpperCAmelCase = max(
lowerCAmelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowerCAmelCase , lowerCAmelCase ) , )
UpperCAmelCase = max_revenue
return max_rev[n]
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : list ) -> Optional[int]:
"""simple docstring"""
_enforce_args(lowerCAmelCase , lowerCAmelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCAmelCase = [float('-inf' ) for _ in range(n + 1 )]
UpperCAmelCase = 0
for i in range(1 , n + 1 ):
UpperCAmelCase = max_rev[i]
for j in range(1 , i + 1 ):
UpperCAmelCase = max(lowerCAmelCase , prices[j - 1] + max_rev[i - j] )
UpperCAmelCase = max_revenue_i
return max_rev[n]
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : list ) -> Union[str, Any]:
"""simple docstring"""
if n < 0:
UpperCAmelCase = F"n must be greater than or equal to 0. Got n = {n}"
raise ValueError(lowerCAmelCase )
if n > len(lowerCAmelCase ):
UpperCAmelCase = (
'Each integral piece of rod must have a corresponding price. '
F"Got n = {n} but length of prices = {len(lowerCAmelCase )}"
)
raise ValueError(lowerCAmelCase )
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [6, 10, 12, 15, 20, 23]
UpperCAmelCase = len(lowerCAmelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCAmelCase = 36
UpperCAmelCase = top_down_cut_rod(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = bottom_up_cut_rod(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = naive_cut_rod_recursive(lowerCAmelCase , lowerCAmelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 183 | 0 |
def _A ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
__snake_case = generate_large_matrix()
__snake_case = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
assert all(row == sorted(SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE__ ) == sorted(SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ ) for col in zip(*SCREAMING_SNAKE_CASE__ ) )
def _A ( SCREAMING_SNAKE_CASE__ : list[int] ):
UpperCamelCase :Any = 0
UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCamelCase :List[Any] = (left + right) // 2
UpperCamelCase :List[str] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCamelCase :Union[str, Any] = mid + 1
else:
UpperCamelCase :Tuple = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
UpperCamelCase :Dict = 0
UpperCamelCase :Optional[Any] = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCamelCase :Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE__ ) * len(grid[0] )) - total
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
UpperCamelCase :Union[str, Any] = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE__ ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE__ ) - i
break
return total
def _A ( ):
from timeit import timeit
print('''Running benchmarks''' )
UpperCamelCase :List[str] = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCamelCase :List[str] = timeit(F'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE__ , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 658 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__snake_case = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def _A ( ):
UpperCamelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase :Dict = parser.parse_args()
return args.f
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="eval" ):
UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
raise ValueError(F'''can\'t find {path}''' )
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_glue.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_clm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase :Any = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_summarization_flax.main()
UpperCamelCase :str = get_results(SCREAMING_SNAKE_CASE_ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :List[str] = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_mlm_flax.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :int = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_ta_mlm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase :Tuple = 7 if get_gpu_count() > 1 else 2
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[int] = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_ner.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Dict = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_qa.main()
UpperCamelCase :int = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 658 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def __lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) )
def __lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ):
'''simple docstring'''
UpperCAmelCase_ = 0.0
for coeff in reversed(_UpperCamelCase ):
UpperCAmelCase_ = result * x + coeff
return result
if __name__ == "__main__":
lowercase__ : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
lowercase__ : List[Any] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 43 | '''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase__ : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=None , _UpperCamelCase : int=None , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[int]=99 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Union[str, Any]=0.02 , ) ->Optional[int]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = initializer_range
def lowerCAmelCase__ ( self : int ) ->Any:
UpperCAmelCase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , )
UpperCAmelCase_ = prepare_blenderbot_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]:
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) ->Tuple:
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase_ , UpperCAmelCase_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) ->Union[str, Any]:
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase_ , UpperCAmelCase_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = 99
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
UpperCAmelCase_ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase_ = input_ids.shape[0]
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase__ ( self : Any ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_config_and_data()
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ )
UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ )
UpperCAmelCase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : str ) ->int:
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ )
UpperCAmelCase_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
UpperCAmelCase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
UpperCAmelCase_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase ( lowerCamelCase , unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
UpperCAmelCase_ = FlaxBlenderbotModelTester(self )
def lowerCAmelCase__ ( self : str ) ->Tuple:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = model_class(UpperCAmelCase__ )
@jax.jit
def encode_jitted(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : Union[str, Any] ):
return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self : str ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = model_class(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase_ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ):
return model.decode(
decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase__ ( self : int ) ->int:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ = model(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
UpperCAmelCase_ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase_ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCAmelCase__ )
UpperCAmelCase_ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase_ = ['''Sam''']
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''jax''' )
UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase_ = tokenizer.batch_decode(UpperCAmelCase__ , **UpperCAmelCase__ )
assert generated_txt[0].strip() == tgt_text
| 43 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Optional[Any] = "▁"
__A : Optional[Any] = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
__A : Union[str, Any] = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
__A : List[Any] = {
"facebook/s2t-small-librispeech-asr": 1024,
}
__A : Tuple = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
__A : List[str] = {"mustc": MUSTC_LANGS}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = MAX_MODEL_INPUT_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = []
def __init__( self : int , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : Tuple="<s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Optional[int]="<pad>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : List[str]=False , lowerCamelCase : int=False , lowerCamelCase : List[str]=None , lowerCamelCase : Tuple=None , lowerCamelCase : Union[str, Any] = None , **lowerCamelCase : int , ) -> Optional[Any]:
lowerCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
lowerCAmelCase_ : int = do_upper_case
lowerCAmelCase_ : Union[str, Any] = do_lower_case
lowerCAmelCase_ : Dict = load_json(_UpperCAmelCase )
lowerCAmelCase_ : str = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : Dict = spm_file
lowerCAmelCase_ : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
lowerCAmelCase_ : Optional[int] = lang_codes
lowerCAmelCase_ : List[str] = LANGUAGES[lang_codes]
lowerCAmelCase_ : List[str] = [F'<lang:{lang}>' for lang in self.langs]
lowerCAmelCase_ : Optional[Any] = {lang: self.sp_model.PieceToId(F'<lang:{lang}>' ) for lang in self.langs}
lowerCAmelCase_ : str = self.lang_tokens
lowerCAmelCase_ : Tuple = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowerCAmelCase_ : List[str] = {}
@property
def __lowercase ( self : Optional[Any] ) -> Optional[Any]:
return len(self.encoder )
@property
def __lowercase ( self : str ) -> List[Any]:
return self._tgt_lang
@tgt_lang.setter
def __lowercase ( self : Dict , lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase_ : Dict = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def __lowercase ( self : Any , lowerCamelCase : Union[str, Any] ) -> List[Any]:
lowerCAmelCase_ : List[Any] = self.lang_code_to_id[tgt_lang]
lowerCAmelCase_ : List[str] = [lang_code_id]
def __lowercase ( self : int , lowerCamelCase : Any ) -> Optional[int]:
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def __lowercase ( self : Optional[int] , lowerCamelCase : Optional[Any] ) -> Tuple:
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def __lowercase ( self : List[Any] , lowerCamelCase : Any ) -> Tuple:
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def __lowercase ( self : str , lowerCamelCase : str ) -> Optional[int]:
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : Tuple = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowerCAmelCase_ : Dict = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowerCAmelCase_ : int = []
else:
current_sub_tokens.append(_UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __lowercase ( self : List[str] , lowerCamelCase : str , lowerCamelCase : Any=None ) -> str:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __lowercase ( self : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : str = None , lowerCamelCase : Tuple = False ) -> Union[str, Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
lowerCAmelCase_ : List[Any] = [1] * len(self.prefix_tokens )
lowerCAmelCase_ : Dict = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def __lowercase ( self : str ) -> Dict:
lowerCAmelCase_ : List[str] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ) -> Optional[Any]:
lowerCAmelCase_ : Tuple = self.__dict__.copy()
lowerCAmelCase_ : List[Any] = None
return state
def __setstate__( self : str , lowerCamelCase : int ) -> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ : Union[str, Any] = {}
lowerCAmelCase_ : Any = load_spm(self.spm_file , self.sp_model_kwargs )
def __lowercase ( self : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple = None ) -> Optional[int]:
lowerCAmelCase_ : Any = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F'{save_directory} should be a directory'
lowerCAmelCase_ : List[str] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
lowerCAmelCase_ : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
lowerCAmelCase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def UpperCamelCase_ ( A__ : str , A__ : Dict[str, Any] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
with open(__UpperCAmelCase , """r""" ) as f:
return json.load(__UpperCAmelCase )
def UpperCamelCase_ ( A__ : Tuple , A__ : str ):
'''simple docstring'''
with open(__UpperCAmelCase , """w""" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 275 | import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = LongformerTokenizer
__UpperCAmelCase = True
__UpperCAmelCase = LongformerTokenizerFast
__UpperCAmelCase = True
def lowercase_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__snake_case : Optional[int] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__snake_case : List[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__snake_case : str = {'unk_token': '<unk>'}
__snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def lowercase_ ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = 'lower newer'
__snake_case : List[Any] = 'lower newer'
return input_text, output_text
def lowercase_ ( self ):
__snake_case : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case : str = 'lower newer'
__snake_case : List[Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__snake_case : str = tokenizer.tokenize(_UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Optional[Any] = tokens + [tokenizer.unk_token]
__snake_case : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_UpperCAmelCase ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_UpperCAmelCase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowercase_ ( self ):
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
__snake_case : List[Any] = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__snake_case : Optional[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__snake_case : Optional[int] = tokenizer.encode(
'sequence builders' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__snake_case : Tuple = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__snake_case : str = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowercase_ ( self ):
__snake_case : int = self.get_tokenizer()
__snake_case : str = 'Encode this sequence.'
__snake_case : Optional[int] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : List[str] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__snake_case : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
__snake_case : str = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__snake_case : Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing spaces after special tokens
__snake_case : int = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )} ) # mask token has a left space
__snake_case : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
__snake_case : Any = 'Encode <mask> sequence'
__snake_case : str = 'Encode <mask>sequence'
__snake_case : Union[str, Any] = tokenizer.encode(_UpperCAmelCase )
__snake_case : Optional[Any] = encoded.index(_UpperCAmelCase )
__snake_case : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : List[Any] = tokenizer.encode(_UpperCAmelCase )
__snake_case : List[Any] = encoded.index(_UpperCAmelCase )
__snake_case : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : int = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : List[str] = 'A, <mask> AllenNLP sentence.'
__snake_case : Optional[Any] = tokenizer_r.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
__snake_case : List[Any] = tokenizer_p.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__snake_case : Any = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__snake_case : str = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def lowercase_ ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__snake_case : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : List[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__snake_case : int = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _UpperCAmelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , _UpperCAmelCase )
self.assertEqual(post_processor_state['trim_offsets'] , _UpperCAmelCase )
def lowercase_ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case : List[Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__snake_case : Any = F"""{text_of_1_token} {text_of_1_token}"""
__snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : List[str] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : str = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : List[Any] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : Optional[Any] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : Any = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : Optional[int] = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : Optional[Any] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ) + 1, 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : Any = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : Any = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : Tuple = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
| 576 | 0 |
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] =[
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any:
# Return True if there is node that has not iterated.
_lowerCamelCase : List[Any] = [False] * len(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : int = [s]
_lowerCamelCase : Optional[Any] = True
while queue:
_lowerCamelCase : int = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Tuple = True
_lowerCamelCase : Tuple = u
return visited[t]
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->str:
_lowerCamelCase : Any = [-1] * (len(SCREAMING_SNAKE_CASE_ ))
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = []
_lowerCamelCase : Optional[int] = [i[:] for i in graph] # Record original cut, copy.
while bfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowerCamelCase : List[str] = float('''Inf''' )
_lowerCamelCase : Union[str, Any] = sink
while s != source:
# Find the minimum value in select path
_lowerCamelCase : Tuple = min(SCREAMING_SNAKE_CASE_ , graph[parent[s]][s] )
_lowerCamelCase : Optional[Any] = parent[s]
max_flow += path_flow
_lowerCamelCase : Optional[Any] = sink
while v != source:
_lowerCamelCase : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCamelCase : Tuple = parent[v]
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 717 | """simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """ViltImageProcessor"""
__snake_case = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , _lowercase=None , _lowercase=None , **_lowercase ) -> Tuple:
_lowerCamelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _lowercase , )
_lowerCamelCase : Tuple = kwargs.pop('''feature_extractor''' )
_lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_lowercase , _lowercase )
_lowerCamelCase : Union[str, Any] = self.image_processor
def __call__( self , _lowercase , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , ) -> BatchEncoding:
_lowerCamelCase : Dict = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
# add pixel_values + pixel_mask
_lowerCamelCase : List[Any] = self.image_processor(_lowercase , return_tensors=_lowercase )
encoding.update(_lowercase )
return encoding
def a__ ( self , *_lowercase , **_lowercase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a__ ( self , *_lowercase , **_lowercase ) -> Optional[int]:
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : Any = self.tokenizer.model_input_names
_lowerCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__ ( self ) -> Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowercase , )
return self.image_processor_class
@property
def a__ ( self ) -> Union[str, Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _lowercase , )
return self.image_processor
| 558 | 0 |
def _snake_case (_snake_case : int) -> int:
assert (
isinstance(_snake_case , _snake_case) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
_lowercase , _lowercase =1, 1
for _ in range(number_of_steps - 1):
_lowercase , _lowercase =current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any, snake_case :Optional[int], snake_case :Optional[Any]=7, snake_case :str=3, snake_case :Optional[int]=18, snake_case :str=30, snake_case :List[Any]=400, snake_case :Any=True, snake_case :Dict=None, snake_case :Any=True, snake_case :Dict=None, snake_case :List[Any]=True, snake_case :Optional[int]=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], snake_case :Any=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], snake_case :int=True, ):
"""simple docstring"""
_lowercase =size if size is not None else {'height': 224, 'width': 224}
_lowercase =crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowercase =parent
_lowercase =batch_size
_lowercase =num_channels
_lowercase =image_size
_lowercase =min_resolution
_lowercase =max_resolution
_lowercase =do_resize
_lowercase =size
_lowercase =do_center_crop
_lowercase =crop_size
_lowercase =do_normalize
_lowercase =image_mean
_lowercase =image_std
_lowercase =do_convert_rgb
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase__ ( self :Dict, snake_case :List[Any]=False, snake_case :Any=False, snake_case :Union[str, Any]=False):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_lowercase =[]
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
255, size=(self.num_channels, self.max_resolution, self.max_resolution), dtype=np.uinta))
else:
_lowercase =[]
for i in range(self.batch_size):
_lowercase , _lowercase =np.random.choice(np.arange(self.min_resolution, self.max_resolution), 2)
image_inputs.append(np.random.randint(255, size=(self.num_channels, width, height), dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_lowercase =[Image.fromarray(np.moveaxis(snake_case, 0, -1)) for x in image_inputs]
if torchify:
_lowercase =[torch.from_numpy(snake_case) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : List[Any] =ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =ChineseCLIPImageProcessingTester(self, do_center_crop=snake_case)
@property
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(snake_case, 'do_resize'))
self.assertTrue(hasattr(snake_case, 'size'))
self.assertTrue(hasattr(snake_case, 'do_center_crop'))
self.assertTrue(hasattr(snake_case, 'center_crop'))
self.assertTrue(hasattr(snake_case, 'do_normalize'))
self.assertTrue(hasattr(snake_case, 'image_mean'))
self.assertTrue(hasattr(snake_case, 'image_std'))
self.assertTrue(hasattr(snake_case, 'do_convert_rgb'))
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {'height': 224, 'width': 224})
self.assertEqual(image_processor.crop_size, {'height': 18, 'width': 18})
_lowercase =self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size, {'height': 84, 'width': 84})
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
pass
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase =self.image_processor_tester.prepare_inputs(equal_resolution=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, Image.Image)
# Test not batched input
_lowercase =image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_lowercase =image_processing(snake_case, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowercase =self.image_processor_tester.prepare_inputs(equal_resolution=snake_case, numpify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, np.ndarray)
# Test not batched input
_lowercase =image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_lowercase =image_processing(snake_case, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase =self.image_processor_tester.prepare_inputs(equal_resolution=snake_case, torchify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, torch.Tensor)
# Test not batched input
_lowercase =image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_lowercase =image_processing(snake_case, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =ChineseCLIPImageProcessingTester(self, num_channels=4, do_center_crop=snake_case)
_lowercase =3
@property
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(snake_case, 'do_resize'))
self.assertTrue(hasattr(snake_case, 'size'))
self.assertTrue(hasattr(snake_case, 'do_center_crop'))
self.assertTrue(hasattr(snake_case, 'center_crop'))
self.assertTrue(hasattr(snake_case, 'do_normalize'))
self.assertTrue(hasattr(snake_case, 'image_mean'))
self.assertTrue(hasattr(snake_case, 'image_std'))
self.assertTrue(hasattr(snake_case, 'do_convert_rgb'))
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
pass
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase =self.image_processor_tester.prepare_inputs(equal_resolution=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, Image.Image)
# Test not batched input
_lowercase =image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_lowercase =image_processing(snake_case, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
| 181 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
if not nums:
return 0
UpperCAmelCase__ : Any = nums[0]
UpperCAmelCase__ : Optional[int] = 0
for num in nums[1:]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = (
max_excluding + num,
max(__UpperCamelCase , __UpperCamelCase ),
)
return max(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Dict = list(range(len(__UpperCamelCase ) ) )
UpperCAmelCase__ : Union[str, Any] = [v / w for v, w in zip(__UpperCamelCase , __UpperCamelCase )]
index.sort(key=lambda __UpperCamelCase : ratio[i] , reverse=__UpperCamelCase )
UpperCAmelCase__ : float = 0
UpperCAmelCase__ : list[float] = [0] * len(__UpperCamelCase )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase__ : Optional[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase__ : Union[str, Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
lowerCamelCase_ = 0
lowerCamelCase_ = len(__UpperCamelCase ) # No of vertices in graph
lowerCamelCase_ = [0] * n
lowerCamelCase_ = [False] * n
def dfs(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = True
lowerCamelCase_ = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,id_ )
lowerCamelCase_ = min(low[at] ,low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCamelCase_ = min(low[at] ,low[to] )
lowerCamelCase_ = []
for i in range(__UpperCamelCase ):
if not visited[i]:
dfs(__UpperCamelCase ,-1 ,__UpperCamelCase ,id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _UpperCAmelCase ( a : str ):
if "model" in orig_key:
snake_case__ = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
snake_case__ = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
snake_case__ = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
snake_case__ = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
snake_case__ = orig_key.split(""".""" )[0].split("""_""" )[-1]
snake_case__ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
snake_case__ = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
snake_case__ = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
snake_case__ = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
snake_case__ = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
snake_case__ = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
snake_case__ = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
snake_case__ = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
snake_case__ = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
snake_case__ = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
snake_case__ = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
snake_case__ = """yoso.""" + orig_key
return orig_key
def _UpperCAmelCase ( a : Tuple , a : Dict ):
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(a )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
snake_case__ = val
snake_case__ = orig_state_dict["""cls.predictions.decoder.bias"""]
snake_case__ = torch.arange(a ).expand((1, -1) ) + 2
return orig_state_dict
def _UpperCAmelCase ( a : int , a : List[Any] , a : List[Any] ):
snake_case__ = torch.load(a , map_location="""cpu""" )["""model_state_dict"""]
snake_case__ = YosoConfig.from_json_file(a )
snake_case__ = YosoForMaskedLM(a )
snake_case__ = convert_checkpoint_helper(config.max_position_embeddings , a )
print(model.load_state_dict(a ) )
model.eval()
model.save_pretrained(a )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 654 | 0 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase__ : List[str] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 451 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : int = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 451 | 1 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :List[str] ) -> Tuple:
a_ : Union[str, Any] = r"\w+[.]\d+"
a_ : str = re.findall(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for pat in pats:
a_ : Union[str, Any] = key.replace(_SCREAMING_SNAKE_CASE , "_".join(pat.split("." ) ) )
return key
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :List[str] ) -> Union[str, Any]:
a_ : List[Any] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
a_ : Optional[Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
a_ : Union[str, Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
a_ : Any = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
a_ : Optional[int] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
a_ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a_ : str = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
a_ : Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a_ : str = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a_ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :str=42 ) -> Union[str, Any]:
# Step 1: Convert pytorch tensor to numpy
a_ : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
a_ : Tuple = flax_model.init_weights(PRNGKey(_SCREAMING_SNAKE_CASE ) )
a_ : str = flatten_dict(_SCREAMING_SNAKE_CASE )
a_ : List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a_ : Optional[int] = rename_key(_SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
a_ , a_ : Union[str, Any] = rename_key_and_reshape_tensor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
a_ : str = jnp.asarray(_SCREAMING_SNAKE_CASE )
return unflatten_dict(_SCREAMING_SNAKE_CASE )
| 473 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
lowerCAmelCase__ : Tuple = """CIDAS/clipseg-rd64-refined"""
lowerCAmelCase__ : Optional[Any] = """image_segmenter"""
lowerCAmelCase__ : Optional[Any] = CLIPSegForImageSegmentation
lowerCAmelCase__ : Any = ["""image""", """text"""]
lowerCAmelCase__ : Optional[Any] = ["""image"""]
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(self , ["vision"] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
return self.pre_processor(text=[label] , images=[image] , padding=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
def A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
with torch.no_grad():
a_ : List[Any] = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def A ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
a_ : List[Any] = outputs.cpu().detach().numpy()
a_ : Optional[Any] = 0
a_ : Optional[int] = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 473 | 1 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> List[Any]:
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__UpperCamelCase = img
__UpperCamelCase = img.shape[1]
__UpperCamelCase = img.shape[0]
__UpperCamelCase = dst_width
__UpperCamelCase = dst_height
__UpperCamelCase = self.src_w / self.dst_w
__UpperCamelCase = self.src_h / self.dst_h
__UpperCamelCase = __UpperCamelCase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def A__ ( self )-> List[str]:
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__UpperCamelCase = self.img[self.get_y(SCREAMING_SNAKE_CASE_ )][self.get_x(SCREAMING_SNAKE_CASE_ )]
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
return int(self.ratio_x * x )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
lowercase__ , lowercase__ : Any = 8_0_0, 6_0_0
lowercase__ : List[str] = imread("image_data/lena.jpg", 1)
lowercase__ : Optional[int] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 451 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( snake_case : List[str] ) -> List[str]:
'''simple docstring'''
print('''Loading config file...''' )
def flatten_yaml_as_dict(snake_case : Optional[int] , snake_case : List[Any]="" , snake_case : str="." ):
__UpperCamelCase = []
for k, v in d.items():
__UpperCamelCase = parent_key + sep + k if parent_key else k
if isinstance(snake_case , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case , snake_case , sep=snake_case ).items() )
else:
items.append((new_key, v) )
return dict(snake_case )
__UpperCamelCase = argparse.Namespace()
with open(snake_case , '''r''' ) as yaml_file:
try:
__UpperCamelCase = yaml.load(snake_case , Loader=yaml.FullLoader )
__UpperCamelCase = flatten_yaml_as_dict(snake_case )
for k, v in flat_cfg.items():
setattr(snake_case , snake_case , snake_case )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(snake_case , str(snake_case ) ) )
return config
def A_ ( snake_case : List[Any] , snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = MobileViTVaConfig()
__UpperCamelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__UpperCamelCase = 1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__UpperCamelCase = 21000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__UpperCamelCase = 151
__UpperCamelCase = 512
__UpperCamelCase = '''ade20k-id2label.json'''
__UpperCamelCase = True
elif task_name.startswith('''voc_''' ):
__UpperCamelCase = 21
__UpperCamelCase = 512
__UpperCamelCase = '''pascal-voc-id2label.json'''
__UpperCamelCase = True
# orig_config
__UpperCamelCase = load_orig_config_file(snake_case )
assert getattr(snake_case , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
__UpperCamelCase = getattr(snake_case , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(snake_case , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__UpperCamelCase = getattr(snake_case , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
__UpperCamelCase = '''huggingface/label-files'''
__UpperCamelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase = {int(snake_case ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def A_ ( snake_case : List[Any] , snake_case : int , snake_case : Any ) -> str:
'''simple docstring'''
__UpperCamelCase = dct.pop(snake_case )
__UpperCamelCase = val
def A_ ( snake_case : int , snake_case : List[Any]=False ) -> Optional[Any]:
'''simple docstring'''
if base_model:
__UpperCamelCase = ''''''
else:
__UpperCamelCase = '''mobilevitv2.'''
__UpperCamelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__UpperCamelCase = k[8:]
else:
__UpperCamelCase = k
if ".block." in k:
__UpperCamelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
__UpperCamelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
__UpperCamelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
__UpperCamelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
__UpperCamelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
__UpperCamelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
__UpperCamelCase = [0, 1]
elif i == 4:
__UpperCamelCase = [0, 1, 2, 3]
elif i == 5:
__UpperCamelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
__UpperCamelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
__UpperCamelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
__UpperCamelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
__UpperCamelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def A_ ( snake_case : List[str] ) -> str:
'''simple docstring'''
__UpperCamelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(snake_case )
for k in keys_to_ignore:
state_dict.pop(snake_case , snake_case )
def A_ ( ) -> str:
'''simple docstring'''
__UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__UpperCamelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def A_ ( snake_case : Dict , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Optional[int] ) -> int:
'''simple docstring'''
__UpperCamelCase = get_mobilevitva_config(snake_case , snake_case )
# load original state_dict
__UpperCamelCase = torch.load(snake_case , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__UpperCamelCase = MobileViTVaForSemanticSegmentation(snake_case ).eval()
__UpperCamelCase = False
else:
__UpperCamelCase = MobileViTVaForImageClassification(snake_case ).eval()
__UpperCamelCase = False
# remove and rename some keys of load the original model
__UpperCamelCase = checkpoint
remove_unused_keys(snake_case )
__UpperCamelCase = create_rename_keys(snake_case , base_model=snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
# load modified state_dict
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__UpperCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCamelCase = model(**snake_case )
# verify classification model
if task_name.startswith('''imagenet''' ):
__UpperCamelCase = outputs.logits
__UpperCamelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__UpperCamelCase = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3] , snake_case , atol=1e-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
lowercase__ : Tuple = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 451 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
_SCREAMING_SNAKE_CASE : int = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(SCREAMING_SNAKE_CASE__ ).content
if __name__ == "__main__":
UpperCAmelCase_ : str = input('Enter Video/IGTV url: ').strip()
UpperCAmelCase_ : List[Any] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F"Done. Video saved to disk as {file_name}.")
| 533 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""CLIPFeatureExtractor"""]
lowerCAmelCase_ = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 411 | 0 |
def a_ ( _UpperCAmelCase : str ) -> list:
if n_term == "":
return []
__snake_case : list = []
for temp in range(int(__snake_case ) ):
series.append(f'''1/{temp + 1}''' if series else '1' )
return series
if __name__ == "__main__":
A__ : int = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 713 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
A__ : Optional[int] = False
class snake_case__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def A_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : List[str] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__snake_case : str = torch.manual_seed(0 )
__snake_case : int = pipe.dual_guided(
prompt='first prompt' , image=__a , text_to_image_strength=0.7_5 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__a )
__snake_case : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained(__a , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : List[str] = generator.manual_seed(0 )
__snake_case : Tuple = pipe.dual_guided(
prompt='first prompt' , image=__a , text_to_image_strength=0.7_5 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : str = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[Any] = 'cyberpunk 2077'
__snake_case : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__snake_case : List[str] = torch.manual_seed(0 )
__snake_case : Tuple = pipe.dual_guided(
prompt=__a , image=__a , text_to_image_strength=0.7_5 , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__snake_case : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : List[str] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__snake_case : Tuple = 'A painting of a squirrel eating a burger '
__snake_case : str = torch.manual_seed(0 )
__snake_case : Optional[Any] = pipe.text_to_image(
prompt=__a , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
__snake_case : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : Optional[int] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__snake_case : int = pipe.image_variation(__a , generator=__a , output_type='numpy' ).images
__snake_case : Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : str = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 124 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __snake_case :
"""simple docstring"""
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
return None
class __snake_case :
"""simple docstring"""
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
return None
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , '''tf''' , 12 , **__lowerCamelCase )
@require_torch
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , '''pt''' , 12 , **__lowerCamelCase )
@require_torch
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
from transformers import BertModel
__A : int = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(__lowerCamelCase ) )
vocab_file.flush()
__A : str = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__A : str = BertModel(BertConfig(vocab_size=len(__lowerCamelCase ) ) )
model.save_pretrained(__lowerCamelCase )
self._test_export(__lowerCamelCase , '''pt''' , 12 , __lowerCamelCase )
@require_tf
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__A : List[str] = self._test_export(__lowerCamelCase , '''tf''' , 12 , **__lowerCamelCase )
__A : List[str] = quantize(Path(__lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__A : Dict = self._test_export(__lowerCamelCase , '''pt''' , 12 , **__lowerCamelCase )
__A : str = quantize(__lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , **__lowerCamelCase ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
__A : Tuple = Path(__lowerCamelCase ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
return path
except Exception as e:
self.fail(__lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
from transformers import BertModel
__A : List[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
__A : Optional[Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , '''pt''' )
@require_tf
@require_tokenizers
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
from transformers import TFBertModel
__A : Any = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
__A : Tuple = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , '''tf''' )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Any = FeatureExtractionPipeline(__lowerCamelCase , __lowerCamelCase )
__A : int = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
__A , __A , __A , __A : Tuple = infer_shapes(__lowerCamelCase , __lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , __lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
__A : Tuple = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
__A , __A : Optional[int] = ensure_valid_input(FuncContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__lowerCamelCase ) , set(__lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__lowerCamelCase , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__A , __A : int = ensure_valid_input(FuncNonContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 177 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = AlbertTokenizer
_lowerCamelCase = AlbertTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCamelCase__( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A : str = AlbertTokenizer(__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = '''this is a test'''
__A : Union[str, Any] = '''this is a test'''
return input_text, output_text
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = '''<pad>'''
__A : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(__lowerCamelCase ) , 3_0000 )
def UpperCamelCase__( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase__( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__A : Dict = self.get_tokenizer()
__A : List[Any] = self.get_rust_tokenizer()
__A : int = '''I was born in 92000, and this is falsé.'''
__A : Any = tokenizer.tokenize(__lowerCamelCase )
__A : Dict = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__A : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__A : List[str] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__A : List[Any] = self.get_rust_tokenizer()
__A : Union[str, Any] = tokenizer.encode(__lowerCamelCase )
__A : List[Any] = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = AlbertTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
__A : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCamelCase , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [48, 25, 21, 1289] )
__A : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
__A : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
__A : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = AlbertTokenizer(__lowerCamelCase )
__A : List[Any] = tokenizer.encode('''sequence builders''' )
__A : str = tokenizer.encode('''multi-sequence build''' )
__A : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
__A : Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 177 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__lowercase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = ["""transformers""", """torch""", """note_seq"""]
def __init__( self : int , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : List[Any] ):
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def _UpperCAmelCase ( cls : List[str] , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[int] ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def _UpperCAmelCase ( cls : Union[str, Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 705 | """simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Union[str, Any] = {"vocab_file": "vocab.txt"}
__A : Dict = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
__A : List[Any] = {
"openbmb/cpm-ant-10b": 1_024,
}
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : Any =collections.OrderedDict()
with open(UpperCamelCase , "r" , encoding="utf-8" ) as reader:
A__ : Tuple =reader.readlines()
for index, token in enumerate(UpperCamelCase ):
A__ : List[str] =token.rstrip("\n" )
A__ : Union[str, Any] =index
return vocab
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any]="<unk>" , UpperCamelCase__ : int=200 ):
A__ : List[Any] =vocab
A__ : List[str] =unk_token
A__ : Union[str, Any] =max_input_chars_per_word
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : int ):
A__ : Union[str, Any] =list(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
A__ : List[str] =0
A__ : List[str] =[]
while start < len(UpperCamelCase__ ):
A__ : int =len(UpperCamelCase__ )
A__ : Optional[int] =None
while start < end:
A__ : Optional[int] ="".join(chars[start:end] )
if substr in self.vocab:
A__ : List[str] =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCamelCase__ )
A__ : Dict =end
return sub_tokens
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : int = VOCAB_FILES_NAMES
__magic_name__ : Dict = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = ["""input_ids""", """attention_mask"""]
__magic_name__ : List[Any] = False
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str="<d>" , UpperCamelCase__ : Optional[int]="</d>" , UpperCamelCase__ : Optional[Any]="<s>" , UpperCamelCase__ : Tuple="</s>" , UpperCamelCase__ : List[Any]="<pad>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="</n>" , UpperCamelCase__ : Dict="</_>" , UpperCamelCase__ : Optional[int]="left" , **UpperCamelCase__ : Tuple , ):
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=UpperCamelCase__ , eod_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , line_token=UpperCamelCase__ , space_token=UpperCamelCase__ , padding_side=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Union[str, Any] =bod_token
A__ : str =eod_token
A__ : Any =load_vocab(UpperCamelCase__ )
A__ : Dict =self.encoder[space_token]
A__ : Any =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A__ : Any =collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase__ : x[1] ) )
A__ : int ={v: k for k, v in self.encoder.items()}
A__ : Union[str, Any] =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
return self.encoder[self.bod_token]
@property
def _UpperCAmelCase ( self : Optional[Any] ):
return self.encoder[self.eod_token]
@property
def _UpperCAmelCase ( self : List[Any] ):
return self.encoder["\n"]
@property
def _UpperCAmelCase ( self : Tuple ):
return len(self.encoder )
def _UpperCAmelCase ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A__ : Optional[Any] =[]
for x in jieba.cut(UpperCamelCase__ , cut_all=UpperCamelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCamelCase__ ) )
return output_tokens
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[str] ):
A__ : int =[i for i in token_ids if i >= 0]
A__ : List[Any] =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Optional[Any] ):
return token in self.encoder
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : List[str] ):
return "".join(UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Any ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Dict ):
return self.decoder.get(UpperCamelCase__ , self.unk_token )
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if os.path.isdir(UpperCamelCase__ ):
A__ : List[str] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
A__ : Dict =(filename_prefix + "-" if filename_prefix else "") + save_directory
A__ : Any =0
if " " in self.encoder:
A__ : Any =self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
A__ : Any =self.encoder["\n"]
del self.encoder["\n"]
A__ : Tuple =collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase__ : x[1] ) )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
A__ : Dict =token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ ))
return [1] + ([0] * len(UpperCamelCase__ ))
| 595 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'detr'
SCREAMING_SNAKE_CASE : List[str] = ['past_key_values']
SCREAMING_SNAKE_CASE : Any = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : int ,lowercase__ : Dict=True ,lowercase__ : Optional[Any]=None ,lowercase__ : str=3 ,lowercase__ : Optional[int]=1_0_0 ,lowercase__ : Optional[int]=6 ,lowercase__ : Optional[Any]=2_0_4_8 ,lowercase__ : Any=8 ,lowercase__ : int=6 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Any=8 ,lowercase__ : List[str]=0.0 ,lowercase__ : Union[str, Any]=0.0 ,lowercase__ : Union[str, Any]=True ,lowercase__ : str="relu" ,lowercase__ : int=2_5_6 ,lowercase__ : int=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : Tuple=0.0 ,lowercase__ : int=0.0_2 ,lowercase__ : Dict=1.0 ,lowercase__ : str=False ,lowercase__ : Union[str, Any]="sine" ,lowercase__ : List[str]="resnet50" ,lowercase__ : Dict=True ,lowercase__ : List[Any]=False ,lowercase__ : Union[str, Any]=1 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : Tuple=2 ,lowercase__ : str=1 ,lowercase__ : List[Any]=1 ,lowercase__ : Dict=5 ,lowercase__ : Optional[Any]=2 ,lowercase__ : int=0.1 ,**lowercase__ : Dict ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowercase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowercase__ ,lowercase__ ):
__lowercase = backbone_config.get('''model_type''' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(lowercase__ )
# set timm attributes to None
__lowercase , __lowercase , __lowercase = None, None, None
__lowercase = use_timm_backbone
__lowercase = backbone_config
__lowercase = num_channels
__lowercase = num_queries
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = encoder_layers
__lowercase = auxiliary_loss
__lowercase = position_embedding_type
__lowercase = backbone
__lowercase = use_pretrained_backbone
__lowercase = dilation
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = mask_loss_coefficient
__lowercase = dice_loss_coefficient
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
super().__init__(is_encoder_decoder=lowercase__ ,**lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return self.d_model
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int ,lowercase__ : PretrainedConfig ,**lowercase__ : Any ):
return cls(backbone_config=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return 1e-5
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return 1_2
| 41 | import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Any = ['image_processor', 'tokenizer']
__UpperCAmelCase : List[str] = 'FlavaImageProcessor'
__UpperCAmelCase : Dict = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self : int , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
UpperCAmelCase__ = kwargs.pop("feature_extractor" )
UpperCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.image_processor
def __call__(self : Optional[int] , __UpperCAmelCase : Optional[ImageInput] = None , __UpperCAmelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Union[bool, str, TruncationStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : int = 0 , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , **__UpperCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCAmelCase__ = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
if images is not None:
UpperCAmelCase__ = self.image_processor(
__UpperCAmelCase , return_image_mask=__UpperCAmelCase , return_codebook_pixels=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
if text is not None and images is not None:
encoding.update(__UpperCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def lowercase_ (self : Optional[Any] , *__UpperCAmelCase : str , **__UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : int , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowercase_ (self : Tuple ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer.model_input_names
UpperCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 486 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
SCREAMING_SNAKE_CASE__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ = logging.getLogger()
def A ( ) -> int:
A__ = argparse.ArgumentParser()
parser.add_argument('-f' )
A__ = parser.parse_args()
return args.f
def A ( __UpperCamelCase , __UpperCamelCase="eval" ) -> Optional[int]:
A__ = os.path.join(__UpperCamelCase , f'''{split}_results.json''' )
if os.path.exists(__UpperCamelCase ):
with open(__UpperCamelCase , 'r' ) as f:
return json.load(__UpperCamelCase )
raise ValueError(f'''can\'t find {path}''' )
SCREAMING_SNAKE_CASE__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : int ):
"""simple docstring"""
A__ = self.get_auto_remove_tmp_dir()
A__ = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(_snake_case , 'argv' , _snake_case ):
run_flax_glue.main()
A__ = get_results(_snake_case )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.get_auto_remove_tmp_dir()
A__ = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(_snake_case , 'argv' , _snake_case ):
run_clm_flax.main()
A__ = get_results(_snake_case )
self.assertLess(result['eval_perplexity'] , 1_00 )
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.get_auto_remove_tmp_dir()
A__ = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(_snake_case , 'argv' , _snake_case ):
run_summarization_flax.main()
A__ = get_results(_snake_case , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_auto_remove_tmp_dir()
A__ = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(_snake_case , 'argv' , _snake_case ):
run_mlm_flax.main()
A__ = get_results(_snake_case )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.get_auto_remove_tmp_dir()
A__ = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(_snake_case , 'argv' , _snake_case ):
run_ta_mlm_flax.main()
A__ = get_results(_snake_case )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A__ = 7 if get_gpu_count() > 1 else 2
A__ = self.get_auto_remove_tmp_dir()
A__ = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(_snake_case , 'argv' , _snake_case ):
run_flax_ner.main()
A__ = get_results(_snake_case )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_auto_remove_tmp_dir()
A__ = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(_snake_case , 'argv' , _snake_case ):
run_qa.main()
A__ = get_results(_snake_case )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 52 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''google/rembert''': 2_5_6,
}
SCREAMING_SNAKE_CASE__ = '''▁'''
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = RemBertTokenizer
def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ):
"""simple docstring"""
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) )
return
A__ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 52 | 1 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''num_heads''' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int=13 , lowerCAmelCase__ : Any=64 , lowerCAmelCase__ : str=3 , lowerCAmelCase__ : Tuple=[16, 48, 96] , lowerCAmelCase__ : Tuple=[1, 3, 6] , lowerCAmelCase__ : str=[1, 2, 10] , lowerCAmelCase__ : Tuple=[7, 3, 3] , lowerCAmelCase__ : Union[str, Any]=[4, 2, 2] , lowerCAmelCase__ : List[Any]=[2, 1, 1] , lowerCAmelCase__ : Union[str, Any]=[2, 2, 2] , lowerCAmelCase__ : int=[False, False, True] , lowerCAmelCase__ : str=[0.0, 0.0, 0.0] , lowerCAmelCase__ : Union[str, Any]=0.02 , lowerCAmelCase__ : int=1e-1_2 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Tuple=2 , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_sizes
_UpperCamelCase = patch_stride
_UpperCamelCase = patch_padding
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = num_labels
_UpperCamelCase = num_channels
_UpperCamelCase = embed_dim
_UpperCamelCase = num_heads
_UpperCamelCase = stride_kv
_UpperCamelCase = depth
_UpperCamelCase = cls_token
_UpperCamelCase = attention_drop_rate
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
def snake_case__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = CvtModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(lowerCAmelCase__ )
_UpperCamelCase = (self.image_size, self.image_size)
_UpperCamelCase , _UpperCamelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_UpperCamelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_UpperCamelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = CvtForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Tuple ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_snake_case : str = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
_snake_case : Optional[Any] = False
_snake_case : int = False
_snake_case : Union[str, Any] = False
_snake_case : Tuple = False
_snake_case : List[Any] = False
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = CvtModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def snake_case__ ( self : str ) -> Any:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def snake_case__ ( self : Tuple ) -> int:
'''simple docstring'''
pass
def snake_case__ ( self : Dict ) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def snake_case__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str ):
_UpperCamelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : int ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
@slow
def snake_case__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = CvtModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def a__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : Dict ) -> Tuple:
'''simple docstring'''
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase__ )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**lowerCAmelCase__ )
# verify the logits
_UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 98 |
import logging
from transformers import PretrainedConfig
_a = logging.getLogger(__name__)
_a = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """bertabs"""
def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=6 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=8 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=0.2 , __lowerCAmelCase=6 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=8 , __lowerCAmelCase=2_0_4_8 , __lowerCAmelCase=0.2 , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = max_pos
lowerCamelCase__ = enc_layers
lowerCamelCase__ = enc_hidden_size
lowerCamelCase__ = enc_heads
lowerCamelCase__ = enc_ff_size
lowerCamelCase__ = enc_dropout
lowerCamelCase__ = dec_layers
lowerCamelCase__ = dec_hidden_size
lowerCamelCase__ = dec_heads
lowerCamelCase__ = dec_ff_size
lowerCamelCase__ = dec_dropout
| 481 | 0 |
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
re.sub('<n>' , '' , UpperCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_ ) )
| 400 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__snake_case = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
A__ : Dict =["""input_features""", """is_longer"""]
def __init__( self : Any , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : List[str]=48000 , UpperCAmelCase_ : List[Any]=480 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : int=1024 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 14000 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : str = "fusion" , UpperCAmelCase_ : str = "repeatpad" , **UpperCAmelCase_ : List[Any] , ):
super().__init__(
feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = top_db
SCREAMING_SNAKE_CASE__ = truncation
SCREAMING_SNAKE_CASE__ = padding
SCREAMING_SNAKE_CASE__ = fft_window_size
SCREAMING_SNAKE_CASE__ = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = max_length_s
SCREAMING_SNAKE_CASE__ = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = frequency_min
SCREAMING_SNAKE_CASE__ = frequency_max
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase_ , min_frequency=UpperCAmelCase_ , max_frequency=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , norm=UpperCAmelCase_ , mel_scale='htk' , )
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase_ , min_frequency=UpperCAmelCase_ , max_frequency=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , norm='slaney' , mel_scale='slaney' , )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : np.array , UpperCAmelCase_ : Optional[np.array] = None ):
SCREAMING_SNAKE_CASE__ = spectrogram(
UpperCAmelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCAmelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def A_ ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE__ = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE__ = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE__ = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE__ = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE__ = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE__ = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE__ = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE__ = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE__ = torch.nn.functional.interpolate(
UpperCAmelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : np.array , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ ) - max_length
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE__ = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters )
SCREAMING_SNAKE_CASE__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE__ = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE__ = False
else:
SCREAMING_SNAKE_CASE__ = self._random_mel_fusion(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
SCREAMING_SNAKE_CASE__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE__ = int(max_length / len(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = np.stack(np.tile(UpperCAmelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE__ = int(max_length / len(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = np.stack(np.tile(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = np.pad(UpperCAmelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters )
SCREAMING_SNAKE_CASE__ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[str] , UpperCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : Optional[int] , ):
SCREAMING_SNAKE_CASE__ = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE__ = isinstance(UpperCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
SCREAMING_SNAKE_CASE__ = is_batched_numpy or (
isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray ):
SCREAMING_SNAKE_CASE__ = np.asarray(UpperCAmelCase_ , dtype=np.floataa )
elif isinstance(UpperCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE__ = [
self._get_input_mel(UpperCAmelCase_ , max_length if max_length else self.nb_max_samples , UpperCAmelCase_ , UpperCAmelCase_ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for mel, longer in padded_inputs:
input_mel.append(UpperCAmelCase_ )
is_longer.append(UpperCAmelCase_ )
if truncation == "fusion" and sum(UpperCAmelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , len(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = True
if isinstance(input_mel[0] , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE__ = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE__ = {'input_features': input_mel, 'is_longer': is_longer}
SCREAMING_SNAKE_CASE__ = BatchFeature(UpperCAmelCase_ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = input_features.convert_to_tensors(UpperCAmelCase_ )
return input_features
| 400 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
lowercase : Any =dataset
lowercase : Union[str, Any] =process
lowercase : str =params
def __len__( self : Any ) -> Dict:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Optional[int] , UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.dataset[i]
lowercase : Optional[Any] =self.process(UpperCAmelCase , **self.params )
return processed
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int]=None ) -> List[Any]:
'''simple docstring'''
lowercase : Dict =loader
lowercase : List[Any] =infer
lowercase : Tuple =params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowercase : Any =None
lowercase : int =loader_batch_size
# Internal bookkeeping
lowercase : Tuple =None
lowercase : Tuple =None
def __len__( self : List[Any] ) -> Dict:
'''simple docstring'''
return len(self.loader )
def __iter__( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str =iter(self.loader )
return self
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowercase : int =self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowercase : str ={}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
# Convert ModelOutput to tuple first
lowercase : str =element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowercase : Any =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowercase : Optional[int] =tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCAmelCase , UpperCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowercase : Optional[int] =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowercase : Optional[Any] =tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowercase : Any =None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowercase : Optional[Any] =element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowercase : Any =np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowercase : List[Any] =element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowercase : Dict =self._loader_batch_data.__class__(UpperCAmelCase )
self._loader_batch_index += 1
return result
def A__ ( self : Tuple ) -> Dict:
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowercase : Any =next(self.iterator )
lowercase : Optional[int] =self.infer(UpperCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCAmelCase , torch.Tensor ):
lowercase : Optional[Any] =processed
else:
lowercase : Optional[int] =list(processed.keys() )[0]
lowercase : Any =processed[key]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[int] =len(UpperCAmelCase )
else:
lowercase : Dict =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowercase : List[str] =observed_batch_size
# Setting internal index to unwrap the batch
lowercase : Dict =processed
lowercase : Any =0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : str=None ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __iter__( self : Optional[int] ) -> str:
'''simple docstring'''
lowercase : int =iter(self.loader )
lowercase : str =None
return self
def A__ ( self : Dict ) -> int:
'''simple docstring'''
if self.subiterator is None:
lowercase : Optional[Any] =self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowercase : int =next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowercase : Optional[int] =self.infer(next(self.iterator ) , **self.params )
lowercase : List[str] =next(self.subiterator )
return processed
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __iter__( self : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase : str =iter(self.loader )
return self
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] =False
lowercase : List[str] =[]
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowercase : Dict =self.loader_batch_item()
lowercase : int =item.pop('''is_last''' )
accumulator.append(UpperCAmelCase )
if is_last:
return accumulator
while not is_last:
lowercase : List[Any] =self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCAmelCase , torch.Tensor ):
lowercase : Optional[int] =processed
else:
lowercase : int =list(processed.keys() )[0]
lowercase : int =processed[key]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Union[str, Any] =len(UpperCAmelCase )
else:
lowercase : List[Any] =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowercase : List[Any] =observed_batch_size
lowercase : Any =processed
lowercase : List[Any] =0
while self._loader_batch_index < self.loader_batch_size:
lowercase : Optional[int] =self.loader_batch_item()
lowercase : str =item.pop('''is_last''' )
accumulator.append(UpperCAmelCase )
if is_last:
return accumulator
else:
lowercase : int =processed
lowercase : int =item.pop('''is_last''' )
accumulator.append(UpperCAmelCase )
return accumulator
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : Dataset , UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
lowercase : Union[str, Any] =dataset
lowercase : Dict =key
def __len__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] , UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.dataset[i][self.key]
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : Dataset , UpperCAmelCase : str , UpperCAmelCase : str ) -> int:
'''simple docstring'''
lowercase : int =dataset
lowercase : Union[str, Any] =keya
lowercase : Any =keya
def __len__( self : Union[str, Any] ) -> Any:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Dict , UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 94 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''char'''
UpperCamelCase_ = '''bpe'''
UpperCamelCase_ = '''wp'''
SCREAMING_SNAKE_CASE = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''char_tokenizer''']
UpperCamelCase_ = '''ViTImageProcessor'''
UpperCamelCase_ = '''MgpstrTokenizer'''
def __init__( self : int , UpperCAmelCase : Tuple=None , UpperCAmelCase : Dict=None , **UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
lowercase : str =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : int =kwargs.pop('''feature_extractor''' )
lowercase : Optional[Any] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
lowercase : Any =tokenizer
lowercase : Dict =AutoTokenizer.from_pretrained('''gpt2''' )
lowercase : Optional[int] =AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : List[Any] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Dict=None , **UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowercase : str =self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None:
lowercase : Any =self.char_tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase : List[str] =encodings['''input_ids''']
return inputs
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase , lowercase , lowercase : Union[str, Any] =sequences
lowercase : Dict =char_preds.size(0 )
lowercase , lowercase : Any =self._decode_helper(UpperCAmelCase , '''char''' )
lowercase , lowercase : int =self._decode_helper(UpperCAmelCase , '''bpe''' )
lowercase , lowercase : Optional[Any] =self._decode_helper(UpperCAmelCase , '''wp''' )
lowercase : Dict =[]
lowercase : List[Any] =[]
for i in range(UpperCAmelCase ):
lowercase : List[str] =[char_scores[i], bpe_scores[i], wp_scores[i]]
lowercase : Union[str, Any] =[char_strs[i], bpe_strs[i], wp_strs[i]]
lowercase : List[str] =scores.index(max(UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowercase : List[Any] ={}
lowercase : Optional[Any] =final_strs
lowercase : Optional[Any] =final_scores
lowercase : str =char_strs
lowercase : Tuple =bpe_strs
lowercase : List[str] =wp_strs
return out
def A__ ( self : str , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowercase : Union[str, Any] =self.char_decode
lowercase : List[str] =1
lowercase : Tuple ='''[s]'''
elif format == DecodeType.BPE:
lowercase : Union[str, Any] =self.bpe_decode
lowercase : Optional[int] =2
lowercase : Optional[Any] ='''#'''
elif format == DecodeType.WORDPIECE:
lowercase : List[str] =self.wp_decode
lowercase : Union[str, Any] =102
lowercase : int ='''[SEP]'''
else:
raise ValueError(f'Format {format} is not supported.' )
lowercase , lowercase : Union[str, Any] =[], []
lowercase : Optional[int] =pred_logits.size(0 )
lowercase : List[str] =pred_logits.size(1 )
lowercase , lowercase : Optional[int] =pred_logits.topk(1 , dim=-1 , largest=UpperCAmelCase , sorted=UpperCAmelCase )
lowercase : Tuple =preds_index.view(-1 , UpperCAmelCase )[:, 1:]
lowercase : Any =decoder(UpperCAmelCase )
lowercase , lowercase : List[str] =torch.nn.functional.softmax(UpperCAmelCase , dim=2 ).max(dim=2 )
lowercase : List[Any] =preds_max_prob[:, 1:]
for index in range(UpperCAmelCase ):
lowercase : List[Any] =preds_str[index].find(UpperCAmelCase )
lowercase : Any =preds_str[index][:pred_eos]
lowercase : Any =preds_index[index].cpu().tolist()
lowercase : Optional[int] =pred_index.index(UpperCAmelCase ) if eos_token in pred_index else -1
lowercase : int =preds_max_prob[index][: pred_eos_index + 1]
lowercase : Tuple =pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCAmelCase )
conf_scores.append(UpperCAmelCase )
return dec_strs, conf_scores
def A__ ( self : List[str] , UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
lowercase : Union[str, Any] =[seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(UpperCAmelCase )]
return decode_strs
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(UpperCAmelCase )
def A__ ( self : Tuple , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =[seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(UpperCAmelCase )]
return decode_strs
| 94 | 1 |
"""simple docstring"""
import math
class A_:
"""simple docstring"""
def __init__( self , A=0 ): # a graph with Node 0,1,...,N-1
_lowerCamelCase : Tuple = n
_lowerCamelCase : List[Any] = [
[math.inf for j in range(0 , A )] for i in range(0 , A )
] # adjacency matrix for weight
_lowerCamelCase : Any = [
[math.inf for j in range(0 , A )] for i in range(0 , A )
] # dp[i][j] stores minimum distance from i to j
def _lowerCAmelCase ( self , A , A , A ):
_lowerCamelCase : Dict = w
def _lowerCAmelCase ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_lowerCamelCase : str = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def _lowerCAmelCase ( self , A , A ):
return self.dp[u][v]
if __name__ == "__main__":
a_ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 349 |
"""simple docstring"""
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCAmelCase_ ( __a : Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = tmp_path / 'file.csv'
_lowerCamelCase : List[str] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(__a , 'w' ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def UpperCAmelCase_ ( __a : List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv'
_lowerCamelCase : int = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(__a , 'w' ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def UpperCAmelCase_ ( __a : Union[str, Any] , __a : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = tmp_path / 'csv_with_image.csv'
_lowerCamelCase : Tuple = textwrap.dedent(
f"\\n image\n {image_file}\n " )
with open(__a , 'w' ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def UpperCAmelCase_ ( __a : Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = tmp_path / 'csv_with_label.csv'
_lowerCamelCase : Union[str, Any] = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(__a , 'w' ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def UpperCAmelCase_ ( __a : Tuple ):
'''simple docstring'''
_lowerCamelCase : str = tmp_path / 'csv_with_int_list.csv'
_lowerCamelCase : int = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(__a , 'w' ) as f:
f.write(__a )
return str(__a )
def UpperCAmelCase_ ( __a : int , __a : Optional[Any] , __a : List[str] ):
'''simple docstring'''
_lowerCamelCase : int = Csv()
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__a , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(__a ) in record.message
for record in caplog.records )
@require_pil
def UpperCAmelCase_ ( __a : Optional[int] ):
'''simple docstring'''
with open(__a , encoding='utf-8' ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
_lowerCamelCase : int = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCAmelCase_ ( __a : Union[str, Any] ):
'''simple docstring'''
with open(__a , encoding='utf-8' ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : Tuple = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
_lowerCamelCase : Any = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
_lowerCamelCase : str = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(__a ) for label in labels]
def UpperCAmelCase_ ( __a : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Any = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda __a : [int(__a ) for i in x.split()]} )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Any = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 349 | 1 |
from __future__ import annotations
class __UpperCamelCase :
def __init__( self : Dict , _lowerCAmelCase : int ) -> None:
"""simple docstring"""
__lowercase = data
__lowercase = None
__lowercase = None
def snake_case ( lowerCamelCase ): # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def snake_case ( ): # Main function for testing.
'''simple docstring'''
__lowercase = Node(1 )
__lowercase = Node(2 )
__lowercase = Node(3 )
__lowercase = Node(4 )
__lowercase = Node(5 )
__lowercase = Node(6 )
__lowercase = Node(7 )
__lowercase = Node(8 )
__lowercase = Node(9 )
print(is_full_binary_tree(lowerCamelCase ) )
print(depth_of_tree(lowerCamelCase ) )
print("""Tree is: """ )
display(lowerCamelCase )
if __name__ == "__main__":
main()
| 80 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __UpperCamelCase ( _lowerCAmelCase ):
# to overwrite at feature extractactor specific tests
__snake_case :Optional[int] = None
__snake_case :Dict = None
@property
def _a ( self : str ) -> List[str]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """feature_size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """sampling_rate""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """padding_value""" ) )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _a ( self : str , _lowerCAmelCase : List[Any]=False ) -> int:
"""simple docstring"""
def _inputs_have_equal_length(_lowerCAmelCase : int ):
__lowercase = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1e-3 ):
return False
return True
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = self.feat_extract_tester.seq_length_diff
__lowercase = self.feat_extract_tester.max_seq_length + pad_diff
__lowercase = self.feat_extract_tester.min_seq_length
__lowercase = self.feat_extract_tester.batch_size
__lowercase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__lowercase = feat_extract.pad(_lowerCAmelCase , padding=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )
__lowercase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""max_length""" )[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__lowercase = feat_extract.pad(_lowerCAmelCase , pad_to_multiple_of=10 )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , pad_to_multiple_of=10 )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = input_a[input_name]
self.assertTrue(all(len(_lowerCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_lowerCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__lowercase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _a ( self : Tuple , _lowerCAmelCase : str=False ) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(_lowerCAmelCase : Tuple ):
__lowercase = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase : Any , _lowerCAmelCase : str ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1e-3 ):
return False
return True
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
__lowercase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to smallest with np
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=_lowerCAmelCase , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to middle
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""longest""" , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""longest""" , truncation=_lowerCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , truncation=_lowerCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__lowercase = 12
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , truncation=_lowerCAmelCase , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , )
__lowercase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__lowercase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__lowercase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self._check_padding(numpify=_lowerCAmelCase )
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
self._check_padding(numpify=_lowerCAmelCase )
def _a ( self : int ) -> Tuple:
"""simple docstring"""
self._check_truncation(numpify=_lowerCAmelCase )
def _a ( self : str ) -> str:
"""simple docstring"""
self._check_truncation(numpify=_lowerCAmelCase )
@require_torch
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _a ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**_lowerCAmelCase )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = [len(_lowerCAmelCase ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**_lowerCAmelCase )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = [len(_lowerCAmelCase ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = min(_lowerCAmelCase )
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 80 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Optional[Any] = "▁"
__A : Tuple = {"vocab_file": "sentencepiece.bpe.model"}
__A : Optional[Any] = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
__A : Any = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict="<s>" , __UpperCamelCase : Optional[int]="</s>" , __UpperCamelCase : Union[str, Any]="</s>" , __UpperCamelCase : Optional[Any]="<s>" , __UpperCamelCase : List[str]="<unk>" , __UpperCamelCase : Optional[Any]="<pad>" , __UpperCamelCase : Tuple="<mask>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : Optional[int] , )->None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
_UpperCAmelCase = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str )->Union[str, Any]:
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , __UpperCamelCase : Optional[Any] )->Any:
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : Any , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False )->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self : Dict )->Optional[Any]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowercase__ ( self : Union[str, Any] )->Optional[int]:
_UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self : int , __UpperCamelCase : str )->List[str]:
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : str )->Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self : Dict , __UpperCamelCase : str )->Union[str, Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self : Dict , __UpperCamelCase : List[str] )->int:
_UpperCAmelCase = ''''''.join(__UpperCamelCase ).replace(__UpperCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None )->Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 712 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__A : Tuple = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__A : int = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__A : Tuple = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__A : Optional[int] = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__A : Union[str, Any] = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
for tf_name, hf_name in patterns:
_UpperCAmelCase = k.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return k
def lowercase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict ):
'''simple docstring'''
_UpperCAmelCase = BigBirdPegasusConfig(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = BigBirdPegasusForConditionalGeneration(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch_model.state_dict()
_UpperCAmelCase = {}
# separating decoder weights
_UpperCAmelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
_UpperCAmelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
_UpperCAmelCase = [k.endswith(_SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE]
if any(_SCREAMING_SNAKE_CASE ):
continue
_UpperCAmelCase = DECODER_PATTERNS
_UpperCAmelCase = rename_state_dict_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
_UpperCAmelCase = v.T
_UpperCAmelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
_UpperCAmelCase = [k.endswith(_SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE]
if any(_SCREAMING_SNAKE_CASE ):
continue
_UpperCAmelCase = REMAINING_PATTERNS
_UpperCAmelCase = rename_state_dict_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
_UpperCAmelCase = v.T
_UpperCAmelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
_UpperCAmelCase = mapping['''model.embed_positions.weight''']
_UpperCAmelCase = mapping.pop('''model.embed_positions.weight''' )
_UpperCAmelCase , _UpperCAmelCase = torch_model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def lowercase ( _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
_UpperCAmelCase = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {}
_UpperCAmelCase = ['''global_step''']
for name, shape in tqdm(_SCREAMING_SNAKE_CASE , desc='''converting tf checkpoint to dict''' ):
_UpperCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCAmelCase = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = array
return tf_weights
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : dict ):
'''simple docstring'''
_UpperCAmelCase = get_tf_weights_as_numpy(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = convert_bigbird_pegasus(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__A : Dict = parser.parse_args()
__A : Tuple = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 95 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
_UpperCAmelCase = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_UpperCAmelCase = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , _snake_case : int , _snake_case : List[str]="<s>" , _snake_case : int="</s>" , _snake_case : List[Any]="</s>" , _snake_case : str="<s>" , _snake_case : List[Any]="<unk>" , _snake_case : List[Any]="<pad>" , _snake_case : Tuple="<mask>" , _snake_case : Optional[Dict[str, Any]] = None , **_snake_case : List[str] , )->None:
'''simple docstring'''
__lowerCAmelCase : Any = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
__lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__lowerCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
__lowerCAmelCase : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCAmelCase : str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Dict = len(self.sp_model ) + self.fairseq_offset
__lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int )->Tuple:
'''simple docstring'''
__lowerCAmelCase : int = self.__dict__.copy()
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , _snake_case : List[str] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase : Optional[int] = {}
__lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None )->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase : Optional[int] = [self.cls_token_id]
__lowerCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : List[str] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False )->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None )->List[int]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = [self.sep_token_id]
__lowerCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCAmelCase__ ( self : str )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : List[str] , _snake_case : str )->List[str]:
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def UpperCAmelCase__ ( self : Tuple , _snake_case : List[Any] )->List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase : Tuple = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self : int , _snake_case : Any )->int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ ( self : List[str] , _snake_case : List[Any] )->Any:
'''simple docstring'''
__lowerCAmelCase : List[Any] = """""".join(_snake_case ).replace(_snake_case , """ """ ).strip()
return out_string
def UpperCAmelCase__ ( self : List[Any] , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase : Union[str, Any] = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , """wb""" ) as fi:
__lowerCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,) | 504 |
import numpy
class snake_case_ :
def __init__( self : List[str] , _snake_case : numpy.ndarray , _snake_case : numpy.ndarray )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__lowerCAmelCase : Tuple = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__lowerCAmelCase : Union[str, Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__lowerCAmelCase : Dict = numpy.random.rand(3 , 1 )
# Real output values provided.
__lowerCAmelCase : Optional[int] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__lowerCAmelCase : Tuple = numpy.zeros(output_array.shape )
def UpperCAmelCase__ ( self : int )->numpy.ndarray:
'''simple docstring'''
__lowerCAmelCase : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__lowerCAmelCase : str = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__lowerCAmelCase : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ ( self : int )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__lowerCAmelCase : Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__lowerCAmelCase : Dict = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ ( self : Any , _snake_case : numpy.ndarray , _snake_case : int , _snake_case : bool )->None:
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
__lowerCAmelCase : Tuple = self.feedforward()
self.back_propagation()
if give_loss:
__lowerCAmelCase : List[Any] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : numpy.ndarray )->int:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = input_arr
__lowerCAmelCase : str = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__lowerCAmelCase : List[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__lowerCAmelCase : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :numpy.ndarray ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :numpy.ndarray ) -> numpy.ndarray:
return (value) * (1 - (value))
def _SCREAMING_SNAKE_CASE ( ) -> int:
__lowerCAmelCase : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__lowerCAmelCase : Optional[Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__lowerCAmelCase : Union[str, Any] = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE , output_array=SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE , iterations=10 , give_loss=SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 504 | 1 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return (data["data"], data["target"])
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(a__ , a__ )
# Predict target for test data
_lowerCAmelCase =xgb.predict(a__ )
_lowerCAmelCase =predictions.reshape(len(a__ ) , 1 )
return predictions
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =fetch_california_housing()
_lowerCAmelCase , _lowerCAmelCase =data_handling(a__ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =train_test_split(
a__ , a__ , test_size=0.25 , random_state=1 )
_lowerCAmelCase =xgboost(a__ , a__ , a__ )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(a__ , a__ )}''' )
print(F'''Mean Square Error : {mean_squared_error(a__ , a__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 721 | '''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowercase__ = flax_key_tuple[:-1] + ('''weight''',)
lowercase__ = torch.permute(SCREAMING_SNAKE_CASE , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE ):
# linear layer
lowercase__ = flax_key_tuple[:-1] + ('''weight''',)
lowercase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowercase__ = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "metadata" in layer:
lowercase__ = layer.split('''metadata''' )
lowercase__ = ''''''.join(split_layer[0] )[:-1]
lowercase__ = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
lowercase__ = layer.split('''kvstore''' )
lowercase__ = ''''''.join(split_layer[0] )[:-1]
lowercase__ = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
lowercase__ = layer.split('''/''' )
lowercase__ = '''/'''.join(split_layer[:-1] )
lowercase__ = (split_layer[-1],)
if "kvstore/path" in layer:
lowercase__ = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
lowercase__ = '''file'''
else:
lowercase__ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = rename_keys(SCREAMING_SNAKE_CASE )
lowercase__ = {}
for k, v in current_block.items():
lowercase__ = v
lowercase__ = new_current_block
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = WEIGHTS_NAME ):
"""simple docstring"""
lowercase__ = convert_file_size_to_int(SCREAMING_SNAKE_CASE )
lowercase__ = []
lowercase__ = {}
lowercase__ = 0
lowercase__ = 0
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
lowercase__ = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE , sep='''/''' )
lowercase__ = {}
for layer in checkpoint_info.keys():
lowercase__ , lowercase__ , lowercase__ = get_key_and_tensorstore_dict(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if curr_real_layer_name in all_layers:
lowercase__ = content
else:
lowercase__ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowercase__ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowercase__ , lowercase__ = rename_base_flax_keys(tuple(key.split('''/''' ) ) , SCREAMING_SNAKE_CASE )
lowercase__ = '''/'''.join(SCREAMING_SNAKE_CASE )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowercase__ = os.path.join(
SCREAMING_SNAKE_CASE , weights_name.replace('''.bin''' , f'-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin' ) )
rename_and_save_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowercase__ = {}
lowercase__ = 0
lowercase__ = raw_weights.to(getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace('''.bin''' , f'-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin' ) )
rename_and_save_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(SCREAMING_SNAKE_CASE ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowercase__ = {}
lowercase__ = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ = weights_name.replace(
'''.bin''' , f'-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE ):05d}.bin' ) # len(sharded_state_dicts):05d}
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ = shard
for key in shard:
lowercase__ = shard_file
# Add the metadata
lowercase__ = {'''total_size''': total_size}
lowercase__ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' , encoding='''utf-8''' ) as f:
lowercase__ = json.dumps(SCREAMING_SNAKE_CASE , indent=2 , sort_keys=SCREAMING_SNAKE_CASE ) + '''\n'''
f.write(SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
lowerCAmelCase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _a ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowercase__ = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
lowercase__ = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
lowercase__ = TaTokenizer.from_pretrained('''t5-small''' )
lowercase__ = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
lowercase__ = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids
lowercase__ = model.generate(SCREAMING_SNAKE_CASE , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 43 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def A_ ( __lowercase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__SCREAMING_SNAKE_CASE = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def A_ ( __lowercase ):
if not isinstance(__lowercase , __lowercase ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
UpperCamelCase_ : int =[]
for num in range(len(__lowercase ) ):
UpperCamelCase_ : Any =0
while 2 * i * i <= odd_composites[num]:
UpperCamelCase_ : str =odd_composites[num] - 2 * i * i
if is_prime(__lowercase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__lowercase ) == n:
return list_nums
return []
def A_ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 395 |
"""simple docstring"""
from __future__ import annotations
def A_ ( __lowercase ):
UpperCamelCase_ : List[Any] =0.00
UpperCamelCase_ : Dict =0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase_ : List[str] =F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__lowercase )
first_sum += 1 / float(__lowercase )
index += 1
return 1 / first_sum
def A_ ( __lowercase ):
UpperCamelCase_ : Union[str, Any] =0.00
UpperCamelCase_ : Optional[int] =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase_ : Tuple =F'''Resistor at index {index} has a negative value!'''
raise ValueError(__lowercase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 395 | 1 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCamelCase_( snake_case : str , snake_case : Union[str, Any]=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def UpperCamelCase_( snake_case : int , snake_case : List[str]=0 ):
'''simple docstring'''
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace("in_layers.0" , "norm1" )
snake_case_ = new_item.replace("in_layers.2" , "conv1" )
snake_case_ = new_item.replace("out_layers.0" , "norm2" )
snake_case_ = new_item.replace("out_layers.3" , "conv2" )
snake_case_ = new_item.replace("emb_layers.1" , "time_emb_proj" )
snake_case_ = new_item.replace("skip_connection" , "conv_shortcut" )
snake_case_ = shave_segments(snake_case , n_shave_prefix_segments=snake_case )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def UpperCamelCase_( snake_case : Any , snake_case : Dict=0 ):
'''simple docstring'''
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace("norm.weight" , "group_norm.weight" )
snake_case_ = new_item.replace("norm.bias" , "group_norm.bias" )
snake_case_ = new_item.replace("proj_out.weight" , "proj_attn.weight" )
snake_case_ = new_item.replace("proj_out.bias" , "proj_attn.bias" )
snake_case_ = shave_segments(snake_case , n_shave_prefix_segments=snake_case )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def UpperCamelCase_( snake_case : Tuple , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Optional[Any]=None , snake_case : str=None , snake_case : Union[str, Any]=None ):
'''simple docstring'''
assert isinstance(snake_case , snake_case ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config["num_head_channels"] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(snake_case )
snake_case_ = key.reshape(snake_case )
snake_case_ = value.reshape(snake_case )
for path in paths:
snake_case_ = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
snake_case_ = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
snake_case_ = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path["old"]][:, :, 0]
else:
snake_case_ = old_checkpoint[path["old"]]
def UpperCamelCase_( snake_case : int , snake_case : Optional[Any] ):
'''simple docstring'''
snake_case_ = {}
snake_case_ = checkpoint["time_embed.0.weight"]
snake_case_ = checkpoint["time_embed.0.bias"]
snake_case_ = checkpoint["time_embed.2.weight"]
snake_case_ = checkpoint["time_embed.2.bias"]
snake_case_ = checkpoint["input_blocks.0.0.weight"]
snake_case_ = checkpoint["input_blocks.0.0.bias"]
snake_case_ = checkpoint["out.0.weight"]
snake_case_ = checkpoint["out.0.bias"]
snake_case_ = checkpoint["out.2.weight"]
snake_case_ = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
snake_case_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(snake_case )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(snake_case )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(snake_case )
}
for i in range(1 , snake_case ):
snake_case_ = (i - 1) // (config["num_res_blocks"] + 1)
snake_case_ = (i - 1) % (config["num_res_blocks"] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(snake_case )
snake_case_ = {"old": f'input_blocks.{i}.0', "new": f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
snake_case , snake_case , snake_case , additional_replacements=[meta_path, resnet_op] , config=snake_case )
if len(snake_case ):
snake_case_ = renew_attention_paths(snake_case )
snake_case_ = {
"old": f'input_blocks.{i}.1',
"new": f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
"key": f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
"query": f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
"value": f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
"key": f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
"query": f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
"value": f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
snake_case , snake_case , snake_case , additional_replacements=[meta_path] , attention_paths_to_split=snake_case , config=snake_case , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(snake_case )
assign_to_checkpoint(snake_case , snake_case , snake_case , config=snake_case )
snake_case_ = renew_resnet_paths(snake_case )
assign_to_checkpoint(snake_case , snake_case , snake_case , config=snake_case )
snake_case_ = renew_attention_paths(snake_case )
snake_case_ = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
snake_case , snake_case , snake_case , attention_paths_to_split=snake_case , config=snake_case )
for i in range(snake_case ):
snake_case_ = i // (config["num_res_blocks"] + 1)
snake_case_ = i % (config["num_res_blocks"] + 1)
snake_case_ = [shave_segments(snake_case , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split("." )[0], shave_segments(snake_case , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case )
else:
snake_case_ = [layer_name]
if len(snake_case ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(snake_case )
snake_case_ = renew_resnet_paths(snake_case )
snake_case_ = {"old": f'output_blocks.{i}.0', "new": f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(snake_case ) == 2:
snake_case_ = []
if len(snake_case ):
snake_case_ = renew_attention_paths(snake_case )
snake_case_ = {
"old": f'output_blocks.{i}.1',
"new": f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
"key": f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
"query": f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
"value": f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
"key": f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
"query": f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
"value": f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
snake_case , snake_case , snake_case , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=snake_case , )
else:
snake_case_ = renew_resnet_paths(snake_case , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = ".".join(["output_blocks", str(snake_case ), path["old"]] )
snake_case_ = ".".join(["up_blocks", str(snake_case ), "resnets", str(snake_case ), path["new"]] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
_SCREAMING_SNAKE_CASE : List[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_SCREAMING_SNAKE_CASE : Any = json.loads(f.read())
_SCREAMING_SNAKE_CASE : Optional[int] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_SCREAMING_SNAKE_CASE : Optional[int] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_SCREAMING_SNAKE_CASE : List[str] = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
_SCREAMING_SNAKE_CASE : str = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
_SCREAMING_SNAKE_CASE : List[str] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 400 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = "▁"
_SCREAMING_SNAKE_CASE : Tuple = {"vocab_file": "sentencepiece.bpe.model"}
_SCREAMING_SNAKE_CASE : int = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : List[str] = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__ = None , **a__ , ) -> None:
'''simple docstring'''
snake_case_ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
snake_case_ = len(self.sp_model ) + self.fairseq_offset
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
'''simple docstring'''
snake_case_ = self.__dict__.copy()
snake_case_ = None
snake_case_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a__ ) -> Any:
'''simple docstring'''
snake_case_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , a__ , a__ = None , a__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , a__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(a__ , out_type=a__ )
def lowerCAmelCase__ ( self , a__ ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(a__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase__ ( self , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = "".join(a__ ).replace(a__ , " " ).strip()
return out_string
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 400 | 1 |
'''simple docstring'''
from __future__ import annotations
a : Dict = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class a :
def __init__( self : Optional[Any] , lowercase_ : dict[str, list[str]] , lowercase_ : str ):
snake_case_ = graph
# mapping node to its parent in resulting breadth first tree
snake_case_ = {}
snake_case_ = source_vertex
def A_ ( self : Optional[Any] ):
snake_case_ = {self.source_vertex}
snake_case_ = None
snake_case_ = [self.source_vertex] # first in first out queue
while queue:
snake_case_ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowercase_ )
snake_case_ = vertex
queue.append(lowercase_ )
def A_ ( self : str , lowercase_ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case_ = self.parent.get(lowercase_ )
if target_vertex_parent is None:
snake_case_ = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(lowercase_ )
return self.shortest_path(lowercase_ ) + F"->{target_vertex}"
if __name__ == "__main__":
a : List[Any] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 593 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
a : Optional[int] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
def __init__( self : List[str] , *lowercase_ : Optional[int] , lowercase_ : Dict=None , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , **lowercase_ : Dict ):
super().__init__(*lowercase_ , **lowercase_ )
snake_case_ = eval_examples
snake_case_ = post_process_function
snake_case_ = quant_trainer_args
snake_case_ = 128 # default number of calibration samples
def A_ ( self : int , lowercase_ : Tuple=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
snake_case_ = calib_dataset if calib_dataset is not None else self.calib_dataset
snake_case_ = self._remove_unused_columns(lowercase_ , description='''Calibration''' )
return DataLoader(
lowercase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowercase_ , )
def A_ ( self : Dict , lowercase_ : str=None ):
snake_case_ = self.train_dataset if calib_dataset is None else calib_dataset
snake_case_ = self.get_calib_dataloader(lowercase_ )
snake_case_ = self.model
quant_trainer.configure_model(lowercase_ , self.quant_trainer_args , calib=lowercase_ )
model.eval()
quant_trainer.enable_calibration(lowercase_ )
logger.info('''***** Running calibration *****''' )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(lowercase_ ):
# Prediction step
snake_case_ ,snake_case_ ,snake_case_ = self.prediction_step(lowercase_ , lowercase_ , prediction_loss_only=lowercase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowercase_ , self.quant_trainer_args )
snake_case_ = model
def A_ ( self : Optional[int] , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : Optional[int]=None , lowercase_ : str = "eval" ):
snake_case_ = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case_ = self.get_eval_dataloader(lowercase_ )
snake_case_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ = self.compute_metrics
snake_case_ = None
snake_case_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case_ = eval_loop(
lowercase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , )
finally:
snake_case_ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
snake_case_ = self.post_process_function(lowercase_ , lowercase_ , output.predictions )
snake_case_ = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
snake_case_ = metrics.pop(lowercase_ )
self.log(lowercase_ )
else:
snake_case_ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def A_ ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Dict=None , lowercase_ : str = "test" ):
snake_case_ = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ = self.compute_metrics
snake_case_ = None
snake_case_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case_ = eval_loop(
lowercase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , )
finally:
snake_case_ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case_ = self.post_process_function(lowercase_ , lowercase_ , output.predictions , '''predict''' )
snake_case_ = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
snake_case_ = metrics.pop(lowercase_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
def A_ ( self : Any , lowercase_ : List[Any]="./" ):
snake_case_ = self.eval_dataset
snake_case_ = self.get_eval_dataloader(lowercase_ )
snake_case_ = next(iter(lowercase_ ) )
# saving device - to make it consistent
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
snake_case_ = tuple(v.to(lowercase_ ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
snake_case_ = True
snake_case_ = self.model.to(lowercase_ )
model.eval()
model.float()
snake_case_ = model.module if hasattr(lowercase_ , '''module''' ) else model
quant_trainer.configure_model(lowercase_ , self.quant_trainer_args )
snake_case_ = os.path.join(lowercase_ , '''model.onnx''' )
logger.info(F"exporting model to {output_model_file}" )
snake_case_ = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
lowercase_ , lowercase_ , lowercase_ , export_params=lowercase_ , opset_version=13 , do_constant_folding=lowercase_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=lowercase_ , )
logger.info('''onnx export finished''' )
| 593 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __magic_name__ ( __UpperCAmelCase ):
__A : List[Any] = "transfo-xl"
__A : str = ["mems"]
__A : int = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , snake_case__ : Optional[int]=2_6_7_7_3_5 , snake_case__ : str=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : Union[str, Any]=1_0_2_4 , snake_case__ : List[str]=1_6 , snake_case__ : Optional[Any]=6_4 , snake_case__ : Union[str, Any]=4_0_9_6 , snake_case__ : List[Any]=4 , snake_case__ : int=False , snake_case__ : Dict=1_8 , snake_case__ : str=1_6_0_0 , snake_case__ : Tuple=1_0_0_0 , snake_case__ : Optional[int]=True , snake_case__ : Optional[Any]=True , snake_case__ : Tuple=0 , snake_case__ : Union[str, Any]=-1 , snake_case__ : Union[str, Any]=True , snake_case__ : Optional[int]=0.1 , snake_case__ : List[str]=0.0 , snake_case__ : int=True , snake_case__ : int="normal" , snake_case__ : str=0.01 , snake_case__ : List[str]=0.01 , snake_case__ : str=0.02 , snake_case__ : List[str]=1e-5 , snake_case__ : List[Any]=0 , **snake_case__ : Any , ):
'''simple docstring'''
lowercase :Optional[Any] = vocab_size
lowercase :Optional[Any] = []
self.cutoffs.extend(snake_case__ )
if proj_share_all_but_first:
lowercase :Union[str, Any] = [False] + [True] * len(self.cutoffs )
else:
lowercase :Optional[Any] = [False] + [False] * len(self.cutoffs )
lowercase :str = d_model
lowercase :str = d_embed
lowercase :Dict = d_head
lowercase :str = d_inner
lowercase :Optional[Any] = div_val
lowercase :str = pre_lnorm
lowercase :Optional[int] = n_layer
lowercase :Dict = n_head
lowercase :int = mem_len
lowercase :Dict = same_length
lowercase :str = attn_type
lowercase :List[Any] = clamp_len
lowercase :List[Any] = sample_softmax
lowercase :List[str] = adaptive
lowercase :Union[str, Any] = dropout
lowercase :Optional[int] = dropatt
lowercase :Union[str, Any] = untie_r
lowercase :List[str] = init
lowercase :Optional[Any] = init_range
lowercase :str = proj_init_std
lowercase :List[Any] = init_std
lowercase :Optional[Any] = layer_norm_epsilon
super().__init__(eos_token_id=snake_case__ , **snake_case__ )
@property
def __snake_case ( self : List[Any] ):
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __snake_case ( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 677 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase (a_ :int , a_ :Union[str, Any] , a_ :List[Any]) -> List[str]:
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowerCamelCase (a_ :Optional[Any] , a_ :Optional[int] , a_ :str , a_ :Any="attention") -> Optional[int]:
lowercase :Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :])
lowercase :int = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2])
lowercase :str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :])
lowercase :Any = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2])
lowercase :int = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :])
lowercase :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2])
lowercase :List[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :])
lowercase :Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2])
return k, o, q, v
def lowerCamelCase (a_ :Any , a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Union[str, Any]=False) -> List[Any]:
if split_mlp_wi:
lowercase :List[Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowercase :Optional[int] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowercase :Dict = (wi_a, wi_a)
else:
lowercase :Optional[Any] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowercase :Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowerCamelCase (a_ :Any , a_ :Optional[Any] , a_ :Optional[Any] , a_ :Union[str, Any]) -> Optional[Any]:
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowerCamelCase (a_ :dict , *, a_ :int , a_ :bool , a_ :bool = False) -> int:
lowercase :Dict = traverse_util.flatten_dict(variables['''target'''])
lowercase :Optional[Any] = {'''/'''.join(a_): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase :str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , a_)
lowercase :str = collections.OrderedDict()
# Shared embeddings.
lowercase :int = old['''token_embedder/embedding''']
# Encoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
lowercase :Union[str, Any] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :Tuple = tax_attention_lookup(a_ , a_ , '''encoder''' , '''attention''')
lowercase :Dict = layer_norm
lowercase :Dict = k.T
lowercase :Union[str, Any] = o.T
lowercase :List[Any] = q.T
lowercase :int = v.T
# Block i, layer 1 (MLP).
lowercase :Optional[int] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_mlp_layer_norm''')
lowercase , lowercase :str = tax_mlp_lookup(a_ , a_ , '''encoder''' , a_)
lowercase :int = layer_norm
if split_mlp_wi:
lowercase :Tuple = wi[0].T
lowercase :Tuple = wi[1].T
else:
lowercase :int = wi.T
lowercase :Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase :Dict = tax_relpos_bias_lookup(
a_ , a_ , '''encoder''').T
lowercase :str = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowercase :str = tax_relpos_bias_lookup(
a_ , 0 , '''encoder''').T
lowercase :List[Any] = tax_relpos_bias_lookup(
a_ , 0 , '''decoder''').T
if not is_encoder_only:
# Decoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_self_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :str = tax_attention_lookup(a_ , a_ , '''decoder''' , '''self_attention''')
lowercase :List[str] = layer_norm
lowercase :Dict = k.T
lowercase :List[Any] = o.T
lowercase :List[Any] = q.T
lowercase :Any = v.T
# Block i, layer 1 (Cross Attention).
lowercase :Tuple = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_cross_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :int = tax_attention_lookup(a_ , a_ , '''decoder''' , '''encoder_decoder_attention''')
lowercase :int = layer_norm
lowercase :Dict = k.T
lowercase :int = o.T
lowercase :List[Any] = q.T
lowercase :Tuple = v.T
# Block i, layer 2 (MLP).
lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_mlp_layer_norm''')
lowercase , lowercase :Tuple = tax_mlp_lookup(a_ , a_ , '''decoder''' , a_)
lowercase :Any = layer_norm
if split_mlp_wi:
lowercase :int = wi[0].T
lowercase :Union[str, Any] = wi[1].T
else:
lowercase :int = wi.T
lowercase :List[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase :Union[str, Any] = tax_relpos_bias_lookup(a_ , a_ , '''decoder''').T
lowercase :Union[str, Any] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase :int = old['''decoder/logits_dense/kernel'''].T
return new
def lowerCamelCase (a_ :Dict , a_ :bool) -> Tuple:
lowercase :str = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()])
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase :Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase :Optional[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''')
lowercase :Optional[int] = state_dict['''shared.weight''']
return state_dict
def lowerCamelCase (a_ :List[str] , a_ :List[str] , a_ :Tuple , a_ :Optional[int] , a_ :List[str]) -> List[str]:
lowercase :Optional[Any] = checkpoints.load_tax_checkpoint(a_)
lowercase :Optional[int] = convert_tax_to_pytorch(
a_ , num_layers=config.num_layers , is_encoder_only=a_ , scalable_attention=a_)
lowercase :Union[str, Any] = make_state_dict(a_ , a_)
model.load_state_dict(a_ , strict=a_)
def lowerCamelCase (a_ :str , a_ :Optional[int] , a_ :Any , a_ :bool = False , a_ :bool = False , ) -> Tuple:
lowercase :Optional[int] = MTaConfig.from_json_file(a_)
print(F"""Building PyTorch model from configuration: {config}""")
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase :Union[str, Any] = UMTaEncoderModel(a_)
else:
lowercase :int = UMTaForConditionalGeneration(a_)
# Load weights from tf checkpoint
load_tax_weights_in_ta(a_ , a_ , a_ , a_ , a_)
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""")
model.save_pretrained(a_)
# Verify that we can load the checkpoint.
model.from_pretrained(a_)
print('''Done''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 677 | 1 |
"""simple docstring"""
import math
import qiskit
def snake_case (A_ :int = 1 , A_ :int = 1 , A_ :int = 1 ):
'''simple docstring'''
if (
isinstance(A_ , A_ )
or isinstance(A_ , A_ )
or isinstance(A_ , A_ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(A_ ) != input_a)
or (math.floor(A_ ) != input_a)
or (math.floor(A_ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
a : Union[str, Any] = qiskit.QuantumRegister(4 , 'qr' )
a : Optional[Any] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
a : Union[str, Any] = [input_a, input_a, carry_in]
a : List[Any] = qiskit.QuantumCircuit(A_ , A_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(A_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(A_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(A_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , A_ ) # measure the last two qbits
a : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
a : Optional[int] = qiskit.execute(A_ , A_ , shots=1_0_0_0 )
return job.result().get_counts(A_ )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 716 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case ( UpperCAmelCase , unittest.TestCase ):
__magic_name__ = RoCBertTokenizer
__magic_name__ = None
__magic_name__ = False
__magic_name__ = True
__magic_name__ = filter_non_english
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
a : str = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
a : List[Any] = {}
a : Any = {}
for i, value in enumerate(A ):
a : Union[str, Any] = i
a : str = i
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A , A , ensure_ascii=A )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A , A , ensure_ascii=A )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : int = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
a : Any = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A ) , [5, 6, 2, 5, 7, 8] )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : List[str] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : Any = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : int = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : Any = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : Dict = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : List[str] = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : List[str] = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=A , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
a : str = {}
for i, token in enumerate(A ):
a : Union[str, Any] = i
a : Any = RoCBertWordpieceTokenizer(vocab=A , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : List[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
a : str = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a : List[str] = self.rust_tokenizer_class.from_pretrained(A , **A )
a : Any = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
a : str = tokenizer_r.encode_plus(
A , return_attention_mask=A , return_token_type_ids=A , return_offsets_mapping=A , add_special_tokens=A , )
a : int = tokenizer_r.do_lower_case if hasattr(A , 'do_lower_case' ) else False
a : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Union[str, Any] = ['的', '人', '有']
a : Tuple = ''.join(A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a : Optional[int] = True
a : Any = self.tokenizer_class.from_pretrained(A , **A )
a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
a : Optional[Any] = tokenizer_p.encode(A , add_special_tokens=A )
a : Dict = tokenizer_r.encode(A , add_special_tokens=A )
a : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(A )
a : Tuple = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A , A )
self.assertListEqual(A , A )
a : Dict = False
a : Any = self.rust_tokenizer_class.from_pretrained(A , **A )
a : List[Any] = self.tokenizer_class.from_pretrained(A , **A )
a : Optional[int] = tokenizer_r.encode(A , add_special_tokens=A )
a : List[str] = tokenizer_p.encode(A , add_special_tokens=A )
a : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(A )
a : Tuple = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that only the first Chinese character is not preceded by "##".
a : List[str] = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(A )
]
self.assertListEqual(A , A )
self.assertListEqual(A , A )
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
a : List[str] = tokenizer.encode('你好' , add_special_tokens=A )
a : int = tokenizer.encode('你是谁' , add_special_tokens=A )
a : Any = tokenizer.build_inputs_with_special_tokens(A )
a : Tuple = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : Optional[int] = self.get_tokenizers(do_lower_case=A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a : Dict = '你好,你是谁'
a : List[str] = tokenizer.tokenize(A )
a : str = tokenizer.convert_tokens_to_ids(A )
a : str = tokenizer.convert_tokens_to_shape_ids(A )
a : Union[str, Any] = tokenizer.convert_tokens_to_pronunciation_ids(A )
a : Any = tokenizer.prepare_for_model(
A , A , A , add_special_tokens=A )
a : Union[str, Any] = tokenizer.encode_plus(A , add_special_tokens=A )
self.assertEqual(A , A )
| 118 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase = {
'gpt2': 1024,
'gpt2-medium': 1024,
'gpt2-large': 1024,
'gpt2-xl': 1024,
'distilgpt2': 1024,
}
class UpperCamelCase__ ( _A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE__ = GPTaTokenizer
def __init__( self : Tuple , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]="<|endoftext|>" , lowerCamelCase_ : List[str]="<|endoftext|>" , lowerCamelCase_ : Union[str, Any]="<|endoftext|>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , add_prefix_space=_a , **_a , )
SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop("""add_bos_token""" , _a )
SCREAMING_SNAKE_CASE : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _a ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(_a , pre_tok_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE : Tuple = add_prefix_space
SCREAMING_SNAKE_CASE : int = pre_tok_class(**_a )
SCREAMING_SNAKE_CASE : Tuple = add_prefix_space
def lowerCamelCase_ ( self : List[str] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.get("""is_split_into_words""" , _a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def lowerCamelCase_ ( self : Optional[int] , *lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get("""is_split_into_words""" , _a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
SCREAMING_SNAKE_CASE : Dict = input_ids[-self.model_max_length :]
return input_ids
| 379 |
'''simple docstring'''
def lowerCAmelCase_ ( ):
a__ = []
a__ = 1
while len(a ) < 1e6:
constant.append(str(a ) )
i += 1
a__ = ''.join(a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 394 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase( self : Union[str, Any] ):
a__ : int = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
a__ : Any = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
a__ : Dict = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
a__ : Tuple = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a__ : Union[str, Any] = model(lowerCamelCase__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCamelCase__ , atol=1E-3 ) )
@slow
def _UpperCamelCase( self : Optional[int] ):
a__ : int = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
a__ : Dict = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
a__ : int = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
a__ : List[str] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a__ : List[Any] = model(lowerCamelCase__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCamelCase__ , atol=1E-3 ) )
| 704 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def UpperCamelCase_ ( *__a ) -> Any:
with open(__a , "r" ) as fh:
fcntl.flock(__a , fcntl.LOCK_EX )
try:
print(*__a )
finally:
fcntl.flock(__a , fcntl.LOCK_UN )
UpperCamelCase : Tuple = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
UpperCamelCase : Any = torch.device("""cuda""", local_rank)
UpperCamelCase : Union[str, Any] = socket.gethostname()
UpperCamelCase : int = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCamelCase : str = dist.get_rank()
UpperCamelCase : Optional[int] = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 151 | 0 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __snake_case ( _lowercase):
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self._create_example_records()
_lowerCamelCase : Tuple = Dataset.from_list(__lowerCAmelCase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(__lowerCAmelCase ):
self.assertDictEqual(__lowerCAmelCase , example_records[i] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Any = self._create_example_records()
_lowerCamelCase : Union[str, Any] = Dataset.from_list(__lowerCAmelCase )
_lowerCamelCase : List[str] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): # checks what happens with missing columns
"""simple docstring"""
_lowerCamelCase : Any = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
_lowerCamelCase : str = Dataset.from_list(__lowerCAmelCase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def SCREAMING_SNAKE_CASE ( self : Dict ): # checks if the type can be inferred from the second record
"""simple docstring"""
_lowerCamelCase : List[str] = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
_lowerCamelCase : str = Dataset.from_list(__lowerCAmelCase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Any = Dataset.from_list([] )
self.assertEqual(len(__lowerCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 83 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_a : List[str] = logging.get_logger(__name__)
def a_ ( __magic_name__ , __magic_name__=False ) -> Optional[Any]:
"""simple docstring"""
snake_case : Tuple = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=False ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
snake_case : List[str] = ''''''
else:
snake_case : str = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
snake_case : str = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case : int = in_proj_weight[
: config.hidden_size, :
]
snake_case : Union[str, Any] = in_proj_bias[: config.hidden_size]
snake_case : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case : List[str] = in_proj_weight[
-config.hidden_size :, :
]
snake_case : List[str] = in_proj_bias[-config.hidden_size :]
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
snake_case : Tuple = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
snake_case : Optional[Any] = dct.pop(__magic_name__ )
snake_case : Any = val
def a_ ( ) -> Tuple:
"""simple docstring"""
snake_case : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case : Any = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Tuple:
"""simple docstring"""
snake_case : List[Any] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=__magic_name__ , )
snake_case : Union[str, Any] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=384 , num_labels=1_000 )
snake_case : str = False
# load original model from timm
snake_case : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case : List[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case : List[str] = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case : Dict = '''huggingface/label-files'''
snake_case : Dict = '''imagenet-1k-id2label.json'''
snake_case : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case : Tuple = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case : Optional[int] = idalabel
snake_case : int = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case : Any = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case : str = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case : str = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case : Optional[int] = transform.transforms
snake_case : str = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
snake_case : Optional[Any] = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case : str = prepare_img()
snake_case : Optional[Any] = transform(__magic_name__ ).unsqueeze(0 )
snake_case : Optional[int] = processor(__magic_name__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case : Dict = model(__magic_name__ )
snake_case : Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
snake_case : Union[str, Any] = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1e-3 )
else:
snake_case : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(F"ybelkada/{vit_name}" )
processor.push_to_hub(F"ybelkada/{vit_name}" )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
_a : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 598 | 0 |
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : list[list[int]], _UpperCAmelCase : int, _UpperCAmelCase : int, _UpperCAmelCase : set):
UpperCamelCase , UpperCamelCase = len(_UpperCAmelCase), len(grid[0])
if (
min(_UpperCAmelCase, _UpperCAmelCase) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col))
UpperCamelCase = 0
count += depth_first_search(_UpperCAmelCase, row + 1, _UpperCAmelCase, _UpperCAmelCase)
count += depth_first_search(_UpperCAmelCase, row - 1, _UpperCAmelCase, _UpperCAmelCase)
count += depth_first_search(_UpperCAmelCase, _UpperCAmelCase, col + 1, _UpperCAmelCase)
count += depth_first_search(_UpperCAmelCase, _UpperCAmelCase, col - 1, _UpperCAmelCase)
visit.remove((row, col))
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowercase__ :
'''simple docstring'''
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
raise NotImplementedError()
def UpperCAmelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ = False , **lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = tokenizer
UpperCamelCase = skip_prompt
UpperCamelCase = decode_kwargs
# variables used in the streaming process
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = True
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
UpperCamelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCamelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
UpperCamelCase = text[self.print_len :]
UpperCamelCase = []
UpperCamelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(lowerCamelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCamelCase = text[self.print_len :]
self.print_len += len(lowerCamelCase__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCamelCase = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(lowerCamelCase__ )
self.on_finalized_text(lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self.token_cache ) > 0:
UpperCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
UpperCamelCase = text[self.print_len :]
UpperCamelCase = []
UpperCamelCase = 0
else:
UpperCamelCase = ''''''
UpperCamelCase = True
self.on_finalized_text(lowerCamelCase__ , stream_end=lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
'''simple docstring'''
print(lowerCamelCase__ , flush=lowerCamelCase__ , end='''''' if not stream_end else None )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = None , **lowerCamelCase__ ):
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase = Queue()
UpperCamelCase = None
UpperCamelCase = timeout
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
'''simple docstring'''
self.text_queue.put(lowerCamelCase__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
'''simple docstring'''
return self
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 350 | 0 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__lowerCAmelCase =datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
_UpperCAmelCase : int = None
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
import pyspark
def generate_fn():
UpperCAmelCase = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
UpperCAmelCase = df_with_partition_id.select("*" ).where(F'''part_id = {partition_id}''' ).drop("part_id" )
UpperCAmelCase = partition_df.collect()
UpperCAmelCase = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : "pyspark.sql.DataFrame" ,__SCREAMING_SNAKE_CASE : Dict=None ,):
UpperCAmelCase = df
UpperCAmelCase = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCAmelCase = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self : Union[str, Any] ):
yield from self.generate_examples_fn()
def _UpperCAmelCase ( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : np.random.Generator ):
UpperCAmelCase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__SCREAMING_SNAKE_CASE )
return SparkExamplesIterable(self.df ,partition_order=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : int ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : int ):
UpperCAmelCase = self.split_shard_indices_by_worker(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
return SparkExamplesIterable(self.df ,partition_order=__SCREAMING_SNAKE_CASE )
@property
def _UpperCAmelCase ( self : Optional[Any] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
_UpperCAmelCase : Optional[Any] = SparkConfig
def __init__( self : List[Any] ,__SCREAMING_SNAKE_CASE : "pyspark.sql.DataFrame" ,__SCREAMING_SNAKE_CASE : str = None ,__SCREAMING_SNAKE_CASE : str = None ,**__SCREAMING_SNAKE_CASE : Union[str, Any] ,):
import pyspark
UpperCAmelCase = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCAmelCase = df
UpperCAmelCase = working_dir
super().__init__(
cache_dir=__SCREAMING_SNAKE_CASE ,config_name=str(self.df.semanticHash() ) ,**__SCREAMING_SNAKE_CASE ,)
def _UpperCAmelCase ( self : Optional[Any] ):
def create_cache_and_write_probe(__SCREAMING_SNAKE_CASE : int ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = os.path.join(self._cache_dir ,"fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__SCREAMING_SNAKE_CASE ,"a" )
return [probe_file]
if self._spark.conf.get("spark.master" ,"" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCAmelCase = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(__SCREAMING_SNAKE_CASE ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _UpperCAmelCase ( self : Dict ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCAmelCase ( self : int ,__SCREAMING_SNAKE_CASE : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : Tuple ):
import pyspark
def get_arrow_batch_size(__SCREAMING_SNAKE_CASE : int ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
UpperCAmelCase = self.df.count()
UpperCAmelCase = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCAmelCase = (
self.df.limit(__SCREAMING_SNAKE_CASE )
.repartition(1 )
.mapInArrow(__SCREAMING_SNAKE_CASE ,"batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCAmelCase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCAmelCase = min(__SCREAMING_SNAKE_CASE ,int(approx_total_size / max_shard_size ) )
UpperCAmelCase = self.df.repartition(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : int ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : int ,):
import pyspark
UpperCAmelCase = ParquetWriter if file_format == "parquet" else ArrowWriter
UpperCAmelCase = os.path.join(self._working_dir ,os.path.basename(__SCREAMING_SNAKE_CASE ) ) if self._working_dir else fpath
UpperCAmelCase = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCAmelCase = self.config.features
UpperCAmelCase = self._writer_batch_size
UpperCAmelCase = self._fs.storage_options
def write_arrow(__SCREAMING_SNAKE_CASE : List[str] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCAmelCase = pyspark.TaskContext().taskAttemptId()
UpperCAmelCase = next(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=["task_id", "num_examples", "num_bytes"] ,)
UpperCAmelCase = 0
UpperCAmelCase = writer_class(
features=__SCREAMING_SNAKE_CASE ,path=working_fpath.replace("SSSSS" ,f'''{shard_id:05d}''' ).replace("TTTTT" ,f'''{task_id:05d}''' ) ,writer_batch_size=__SCREAMING_SNAKE_CASE ,storage_options=__SCREAMING_SNAKE_CASE ,embed_local_files=__SCREAMING_SNAKE_CASE ,)
UpperCAmelCase = pa.Table.from_batches([first_batch] )
writer.write_table(__SCREAMING_SNAKE_CASE )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["task_id", "num_examples", "num_bytes"] ,)
shard_id += 1
UpperCAmelCase = writer_class(
features=writer._features ,path=working_fpath.replace("SSSSS" ,f'''{shard_id:05d}''' ).replace("TTTTT" ,f'''{task_id:05d}''' ) ,writer_batch_size=__SCREAMING_SNAKE_CASE ,storage_options=__SCREAMING_SNAKE_CASE ,embed_local_files=__SCREAMING_SNAKE_CASE ,)
UpperCAmelCase = pa.Table.from_batches([batch] )
writer.write_table(__SCREAMING_SNAKE_CASE )
if writer._num_bytes > 0:
UpperCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["task_id", "num_examples", "num_bytes"] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase = os.path.join(os.path.dirname(__SCREAMING_SNAKE_CASE ) ,os.path.basename(__SCREAMING_SNAKE_CASE ) )
shutil.move(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = (
self.df.mapInArrow(__SCREAMING_SNAKE_CASE ,"task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) ,pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) ,pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) ,pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : "datasets.SplitGenerator" ,__SCREAMING_SNAKE_CASE : str = "arrow" ,__SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = None ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,**__SCREAMING_SNAKE_CASE : Optional[int] ,):
self._validate_cache_dir()
UpperCAmelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = not is_remote_filesystem(self._fs )
UpperCAmelCase = os.path.join if is_local else posixpath.join
UpperCAmelCase = "-TTTTT-SSSSS-of-NNNNN"
UpperCAmelCase = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
UpperCAmelCase = path_join(self._output_dir ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = []
UpperCAmelCase = []
for task_id, content in self._prepare_split_single(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
(
UpperCAmelCase
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = total_num_examples
UpperCAmelCase = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
UpperCAmelCase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCAmelCase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : int ,):
rename(
__SCREAMING_SNAKE_CASE ,fpath.replace("SSSSS" ,f'''{shard_id:05d}''' ).replace("TTTTT" ,f'''{task_id:05d}''' ) ,fpath.replace("TTTTT-SSSSS" ,f'''{global_shard_id:05d}''' ).replace("NNNNN" ,f'''{total_shards:05d}''' ) ,)
UpperCAmelCase = []
UpperCAmelCase = 0
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase = task_id_and_num_shards[i]
for shard_id in range(__SCREAMING_SNAKE_CASE ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__SCREAMING_SNAKE_CASE ,len(__SCREAMING_SNAKE_CASE ) ).map(lambda __SCREAMING_SNAKE_CASE : _rename_shard(*__SCREAMING_SNAKE_CASE ) ).collect()
else:
# don't use any pattern
UpperCAmelCase = 0
UpperCAmelCase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" ,f'''{shard_id:05d}''' ).replace("TTTTT" ,f'''{task_id:05d}''' ) ,fpath.replace(__SCREAMING_SNAKE_CASE ,"" ) ,)
def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : "datasets.SplitGenerator" ,):
return SparkExamplesIterable(self.df )
| 333 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[Any] ={'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] =[
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 172 | 0 |
import torch
def UpperCAmelCase ( ):
'''simple docstring'''
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : str = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE__ : str = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 703 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs | 26 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->int | float:
"""simple docstring"""
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
__lowercase : str = (left + right) >> 1 # the middle
__lowercase : Tuple = find_max(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) # find max in range[left, mid]
__lowercase : Any = find_max(_lowerCamelCase, mid + 1, _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 575 |
"""simple docstring"""
def snake_case__ ( _lowerCamelCase ) ->int:
"""simple docstring"""
if not isinstance(_lowerCamelCase, _lowerCamelCase ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 575 | 1 |
def lowerCAmelCase__ ( a__ , a__ ) ->bool:
'''simple docstring'''
_UpperCamelCase = len(a__ )
_UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 1 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :int | str) -> bool:
__a : Dict = str(a_)
return n == n[::-1]
def __A ( a_ :int = 1_00_00_00) -> Any:
__a : Tuple = 0
for i in range(1 , a_):
if is_palindrome(a_) and is_palindrome(bin(a_).split('''b''')[1]):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip())))) | 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''convnextv2'''
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=224 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : List[str] = num_channels
__a : str = patch_size
__a : Dict = num_stages
__a : List[str] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__a : List[str] = [3, 3, 9, 3] if depths is None else depths
__a : List[Any] = hidden_act
__a : Any = initializer_range
__a : Optional[int] = layer_norm_eps
__a : List[Any] = drop_path_rate
__a : Any = image_size
__a : str = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__a , __a : Optional[int] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names ) | 52 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_( _snake_case ):
__UpperCamelCase = ['''image_processor''', '''tokenizer''']
__UpperCamelCase = '''AutoImageProcessor'''
__UpperCamelCase = '''AutoTokenizer'''
def __init__( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : str ):
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase : Optional[Any] = self.image_processor
def __call__( self : List[Any] , UpperCamelCase_ : int=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : int ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCAmelCase : List[Any] = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if images is not None:
lowerCAmelCase : Any = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and images is not None:
lowerCAmelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def lowerCamelCase__ ( self : Tuple , *UpperCamelCase_ : int , **UpperCamelCase_ : List[str] ):
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def lowerCamelCase__ ( self : int , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[Any] ):
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def lowerCamelCase__ ( self : List[str] ):
return ["input_ids", "attention_mask", "pixel_values"]
| 701 |
"""simple docstring"""
from __future__ import annotations
class snake_case_:
def __init__( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str ):
lowerCAmelCase, lowerCAmelCase : List[str] = text, pattern
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ), len(UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__ ( self : Dict ):
# searches pattern in text and returns index positions
lowerCAmelCase : Union[str, Any] = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCAmelCase : str = self.mismatch_in_text(UpperCamelCase_ )
if mismatch_index == -1:
positions.append(UpperCamelCase_ )
else:
lowerCAmelCase : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] )
lowerCAmelCase : int = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
snake_case__ : str = '''ABAABA'''
snake_case__ : List[str] = '''AB'''
snake_case__ : Union[str, Any] = BoyerMooreSearch(text, pattern)
snake_case__ : Optional[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 637 | 0 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
def __call__( self ):
'''simple docstring'''
UpperCamelCase__ :int = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
UpperCamelCase__ :Optional[Any] = 1
UpperCamelCase__ :Optional[int] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
UpperCamelCase__ :int = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
UpperCamelCase__ :int = scheduler_output - scheduler_output + torch.ones_like(UpperCamelCase_ )
return result | 189 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def a ( __a , __a = 16 ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCamelCase__ :Optional[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase__ :str = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase__ :Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase__ :str = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase__ :Tuple = 8
else:
UpperCamelCase__ :int = None
return tokenizer.pad(
__a , padding='''longest''' , max_length=__a , pad_to_multiple_of=__a , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCamelCase__ :Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a , drop_last=__a )
UpperCamelCase__ :Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def a ( __a , __a ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :List[Any] = config['''lr''']
UpperCamelCase__ :List[str] = int(config['''num_epochs'''] )
UpperCamelCase__ :int = int(config['''seed'''] )
UpperCamelCase__ :str = int(config['''batch_size'''] )
UpperCamelCase__ :List[str] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCamelCase__ :Tuple = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase__ :Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase__ :Dict = MAX_GPU_BATCH_SIZE
set_seed(__a )
UpperCamelCase__ , UpperCamelCase__ :Tuple = get_dataloaders(__a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :List[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase__ :Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase__ :Union[str, Any] = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
UpperCamelCase__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=100 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase__ :Dict = model(**__a )
UpperCamelCase__ :Union[str, Any] = outputs.loss
UpperCamelCase__ :Tuple = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ :Optional[int] = model(**__a )
UpperCamelCase__ :int = outputs.logits.argmax(dim=-1 )
UpperCamelCase__ , UpperCamelCase__ :List[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__a , references=__a , )
UpperCamelCase__ :List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __a )
def a ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__a , default=__a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCamelCase__ :int = parser.parse_args()
UpperCamelCase__ :int = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main() | 189 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def UpperCAmelCase ( A__: float , A__: float , A__: float ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : List[str] = 'Speech2TextFeatureExtractor'
__a : Optional[int] = 'Speech2TextTokenizer'
def __init__( self , __a , __a ):
super().__init__(__a , __a )
__lowerCamelCase : Union[str, Any] = self.feature_extractor
__lowerCamelCase : List[str] = False
def __call__( self , *__a , **__a ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__lowerCamelCase : str = kwargs.pop('raw_speech' )
else:
__lowerCamelCase : Optional[Any] = kwargs.pop('audio' , __a )
__lowerCamelCase : Optional[Any] = kwargs.pop('sampling_rate' , __a )
__lowerCamelCase : Optional[int] = kwargs.pop('text' , __a )
if len(__a ) > 0:
__lowerCamelCase : Optional[int] = args[0]
__lowerCamelCase : str = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__lowerCamelCase : Dict = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if text is not None:
__lowerCamelCase : Optional[int] = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowerCamelCase : Any = encodings['input_ids']
return inputs
def snake_case_ ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case_ ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def snake_case_ ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__lowerCamelCase : int = True
__lowerCamelCase : Union[str, Any] = self.tokenizer
yield
__lowerCamelCase : Tuple = self.feature_extractor
__lowerCamelCase : List[str] = False
| 263 | 1 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = FlaxAutoencoderKL
@property
def a__ ( self ) -> int:
UpperCAmelCase_ : int = 4
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : str = (32, 32)
UpperCAmelCase_ : Dict = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Optional[int] = jax.random.uniform(_SCREAMING_SNAKE_CASE ,((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Any = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
UpperCAmelCase_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict | 30 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
UpperCAmelCase : Dict = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a = field(
default=A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a = field(
default=A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a = field(
default=A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__a = field(
default=A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__a = field(
default=A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(default=A , metadata={"""help""": """The input training data file (a text file)."""} )
__a = field(
default=A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__a = field(
default=A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__a = field(
default=A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__a = field(
default=A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.train_file is not None:
__UpperCAmelCase : List[Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__UpperCAmelCase : List[str] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = 42
__a = True
__a = None
__a = None
def __call__( self : Tuple , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : List[str] = """label""" if """label""" in features[0].keys() else """labels"""
__UpperCAmelCase : Union[str, Any] = [feature.pop(UpperCamelCase ) for feature in features]
__UpperCAmelCase : str = len(UpperCamelCase )
__UpperCAmelCase : Dict = len(features[0]["""input_ids"""] )
__UpperCAmelCase : int = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features
]
__UpperCAmelCase : str = list(chain(*UpperCamelCase ) )
__UpperCAmelCase : int = self.tokenizer.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
__UpperCAmelCase : Optional[Any] = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__UpperCAmelCase : int = torch.tensor(UpperCamelCase , dtype=torch.intaa )
return batch
def lowerCamelCase ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase : Any = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__UpperCAmelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__UpperCAmelCase : str = {}
if data_args.train_file is not None:
__UpperCAmelCase : str = data_args.train_file
if data_args.validation_file is not None:
__UpperCAmelCase : Union[str, Any] = data_args.validation_file
__UpperCAmelCase : List[Any] = data_args.train_file.split(""".""" )[-1]
__UpperCAmelCase : Optional[int] = load_dataset(
_UpperCamelCase , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__UpperCAmelCase : Any = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__UpperCAmelCase : Dict = [f'''ending{i}''' for i in range(4 )]
__UpperCAmelCase : Any = """sent1"""
__UpperCAmelCase : List[str] = """sent2"""
if data_args.max_seq_length is None:
__UpperCAmelCase : List[str] = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
__UpperCAmelCase : str = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__UpperCAmelCase : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_UpperCamelCase : str ):
__UpperCAmelCase : List[str] = [[context] * 4 for context in examples[context_name]]
__UpperCAmelCase : Union[str, Any] = examples[question_header_name]
__UpperCAmelCase : int = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(_UpperCamelCase )
]
# Flatten out
__UpperCAmelCase : List[str] = list(chain(*_UpperCamelCase ) )
__UpperCAmelCase : List[Any] = list(chain(*_UpperCamelCase ) )
# Tokenize
__UpperCAmelCase : Optional[int] = tokenizer(
_UpperCamelCase , _UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_UpperCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__UpperCAmelCase : List[Any] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__UpperCAmelCase : Optional[int] = min(len(_UpperCamelCase ) , data_args.max_train_samples )
__UpperCAmelCase : Union[str, Any] = train_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__UpperCAmelCase : List[str] = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__UpperCAmelCase : int = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__UpperCAmelCase : Dict = min(len(_UpperCamelCase ) , data_args.max_eval_samples )
__UpperCAmelCase : Any = eval_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__UpperCAmelCase : Any = eval_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__UpperCAmelCase : Optional[int] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_UpperCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_UpperCamelCase : Dict ):
__UpperCAmelCase ,__UpperCAmelCase : List[str] = eval_predictions
__UpperCAmelCase : Optional[int] = np.argmax(_UpperCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__UpperCAmelCase : Tuple = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , compute_metrics=_UpperCamelCase , )
# Training
if training_args.do_train:
__UpperCAmelCase : str = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase : List[str] = last_checkpoint
__UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__UpperCAmelCase : Dict = train_result.metrics
__UpperCAmelCase : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
__UpperCAmelCase : List[Any] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics("""train""" , _UpperCamelCase )
trainer.save_metrics("""train""" , _UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__UpperCAmelCase : List[str] = trainer.evaluate()
__UpperCAmelCase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics("""eval""" , _UpperCamelCase )
trainer.save_metrics("""eval""" , _UpperCamelCase )
__UpperCAmelCase : Tuple = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 139 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : str = 'segformer'
def __init__(self : List[Any] , a__ : int=3 , a__ : Optional[Any]=4 , a__ : Any=[2, 2, 2, 2] , a__ : Union[str, Any]=[8, 4, 2, 1] , a__ : str=[32, 64, 160, 256] , a__ : Optional[Any]=[7, 3, 3, 3] , a__ : Tuple=[4, 2, 2, 2] , a__ : Tuple=[1, 2, 5, 8] , a__ : List[Any]=[4, 4, 4, 4] , a__ : Dict="gelu" , a__ : Union[str, Any]=0.0 , a__ : List[str]=0.0 , a__ : Dict=0.1 , a__ : Any=0.0_2 , a__ : Optional[int]=0.1 , a__ : Tuple=1E-6 , a__ : Any=256 , a__ : Optional[Any]=255 , **a__ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , a__ , )
__snake_case = num_channels
__snake_case = num_encoder_blocks
__snake_case = depths
__snake_case = sr_ratios
__snake_case = hidden_sizes
__snake_case = patch_sizes
__snake_case = strides
__snake_case = mlp_ratios
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = drop_path_rate
__snake_case = layer_norm_eps
__snake_case = decoder_hidden_size
__snake_case = kwargs.get('''reshape_last_stage''' , a__ )
__snake_case = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Dict = version.parse('1.11' )
@property
def a (self : List[str] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a (self : Tuple ):
"""simple docstring"""
return 1E-4
@property
def a (self : int ):
"""simple docstring"""
return 12
| 704 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Any ) -> Optional[Any]:
__snake_case = checkpoint
__snake_case = {}
__snake_case = vae_state_dict['''encoder.conv_in.weight''']
__snake_case = vae_state_dict['''encoder.conv_in.bias''']
__snake_case = vae_state_dict['''encoder.conv_out.weight''']
__snake_case = vae_state_dict['''encoder.conv_out.bias''']
__snake_case = vae_state_dict['''encoder.norm_out.weight''']
__snake_case = vae_state_dict['''encoder.norm_out.bias''']
__snake_case = vae_state_dict['''decoder.conv_in.weight''']
__snake_case = vae_state_dict['''decoder.conv_in.bias''']
__snake_case = vae_state_dict['''decoder.conv_out.weight''']
__snake_case = vae_state_dict['''decoder.conv_out.bias''']
__snake_case = vae_state_dict['''decoder.norm_out.weight''']
__snake_case = vae_state_dict['''decoder.norm_out.bias''']
__snake_case = vae_state_dict['''quant_conv.weight''']
__snake_case = vae_state_dict['''quant_conv.bias''']
__snake_case = vae_state_dict['''post_quant_conv.weight''']
__snake_case = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
__snake_case = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
__snake_case = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
__snake_case = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
__snake_case = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
__snake_case = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
__snake_case = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
__snake_case = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
__snake_case = renew_vae_resnet_paths(snake_case_ )
__snake_case = {'''old''': f"""down.{i}.block""", '''new''': f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
__snake_case = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
__snake_case = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
__snake_case = renew_vae_resnet_paths(snake_case_ )
__snake_case = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
__snake_case = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
__snake_case = renew_vae_attention_paths(snake_case_ )
__snake_case = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
__snake_case = num_up_blocks - 1 - i
__snake_case = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
__snake_case = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
__snake_case = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
__snake_case = renew_vae_resnet_paths(snake_case_ )
__snake_case = {'''old''': f"""up.{block_id}.block""", '''new''': f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
__snake_case = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
__snake_case = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
__snake_case = renew_vae_resnet_paths(snake_case_ )
__snake_case = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
__snake_case = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
__snake_case = renew_vae_attention_paths(snake_case_ )
__snake_case = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str , ) -> int:
# Only support V1
__snake_case = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
__snake_case = io.BytesIO(r.content )
__snake_case = OmegaConf.load(snake_case_ )
__snake_case = 512
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
__snake_case = {}
with safe_open(snake_case_ , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
__snake_case = f.get_tensor(snake_case_ )
else:
__snake_case = torch.load(snake_case_ , map_location=snake_case_ )['''state_dict''']
# Convert the VAE model.
__snake_case = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
__snake_case = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
__snake_case = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
snake_case_ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 388 | 0 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case : Optional[int] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
snake_case : int = importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
snake_case : Dict = spec.loader.load_module()
snake_case : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
snake_case : Any = re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
snake_case : int = {
"""CLIPConfigMixin""",
"""DecisionTransformerConfigMixin""",
"""EncoderDecoderConfigMixin""",
"""RagConfigMixin""",
"""SpeechEncoderDecoderConfigMixin""",
"""VisionEncoderDecoderConfigMixin""",
"""VisionTextDualEncoderConfigMixin""",
}
def A ( ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = []
for config_class in list(CONFIG_MAPPING.values() ):
__magic_name__ = False
# source code of `config_class`
__magic_name__ = inspect.getsource(snake_case_ )
__magic_name__ = _re_checkpoint.findall(snake_case_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
__magic_name__ , __magic_name__ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
__magic_name__ = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
__magic_name__ = True
break
__magic_name__ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(snake_case_ )
if len(snake_case_ ) > 0:
__magic_name__ = '\n'.join(sorted(snake_case_ ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 545 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
def __init__( self : Optional[int] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Dict=13 ,__lowerCamelCase : List[Any]=7 ,__lowerCamelCase : Dict=True ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : Optional[Any]=True ,__lowerCamelCase : Any=True ,__lowerCamelCase : Tuple=99 ,__lowerCamelCase : Union[str, Any]=32 ,__lowerCamelCase : Union[str, Any]=5 ,__lowerCamelCase : List[Any]=4 ,__lowerCamelCase : List[Any]=37 ,__lowerCamelCase : List[str]="gelu" ,__lowerCamelCase : Union[str, Any]=0.1 ,__lowerCamelCase : int=0.1 ,__lowerCamelCase : Union[str, Any]=1_28 ,__lowerCamelCase : Optional[Any]=32 ,__lowerCamelCase : Optional[Any]=16 ,__lowerCamelCase : Dict=2 ,__lowerCamelCase : Any=0.02 ,__lowerCamelCase : List[Any]=3 ,__lowerCamelCase : Optional[Any]=4 ,__lowerCamelCase : str=None ,):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a = ids_tensor([self.batch_size] ,self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCamelCase ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = self.prepare_config_and_inputs()
a = True
a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : List[str] ,__lowerCamelCase : int ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Dict ,__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
a = NezhaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase )
a = model(__lowerCamelCase ,token_type_ids=__lowerCamelCase )
a = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : int ,__lowerCamelCase : Dict ,__lowerCamelCase : List[Any] ,__lowerCamelCase : int ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Dict ,__lowerCamelCase : List[Any] ,__lowerCamelCase : List[Any] ,):
'''simple docstring'''
a = True
a = NezhaModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,encoder_hidden_states=__lowerCamelCase ,encoder_attention_mask=__lowerCamelCase ,)
a = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,encoder_hidden_states=__lowerCamelCase ,)
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : str ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Dict ,__lowerCamelCase : Tuple ):
'''simple docstring'''
a = NezhaForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Tuple ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : str ,__lowerCamelCase : Dict ,__lowerCamelCase : Tuple ,__lowerCamelCase : Dict ,__lowerCamelCase : str ):
'''simple docstring'''
a = NezhaForNextSentencePrediction(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : int ,__lowerCamelCase : Dict ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : str ):
'''simple docstring'''
a = NezhaForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ,next_sentence_label=__lowerCamelCase ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : int ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Dict ,__lowerCamelCase : List[str] ):
'''simple docstring'''
a = NezhaForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,start_positions=__lowerCamelCase ,end_positions=__lowerCamelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[str] ,__lowerCamelCase : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a = self.num_labels
a = NezhaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[str] ,__lowerCamelCase : Tuple ,__lowerCamelCase : int ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
a = self.num_labels
a = NezhaForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Any ,__lowerCamelCase : int ,__lowerCamelCase : Any ,__lowerCamelCase : Dict ,__lowerCamelCase : Dict ):
'''simple docstring'''
a = self.num_choices
a = NezhaForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
a = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
a = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
a = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( a_ , a_ , a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : str ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any]=False ):
'''simple docstring'''
a = super()._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ,return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=__lowerCamelCase )
a = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__lowerCamelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = NezhaModelTester(self )
a = ConfigTester(self ,config_class=__lowerCamelCase ,hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a = None
self.model_tester.create_and_check_model_as_decoder(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,)
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = NezhaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
a = True
a = model_class(config=__lowerCamelCase )
a = self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase )
a = torch.jit.trace(
__lowerCamelCase ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowerCamelCase ,os.path.join(__lowerCamelCase ,'''bert.pt''' ) )
a = torch.jit.load(os.path.join(__lowerCamelCase ,'''bert.pt''' ) ,map_location=__lowerCamelCase )
loaded(inputs_dict['''input_ids'''].to(__lowerCamelCase ) ,inputs_dict['''attention_mask'''].to(__lowerCamelCase ) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
a = torch.tensor([[0, 1, 2, 3, 4, 5]] )
a = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase )[0]
a = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape ,__lowerCamelCase )
a = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,__lowerCamelCase ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
a = torch.tensor([[0, 1, 2, 3, 4, 5]] )
a = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase )[0]
a = torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape ,__lowerCamelCase )
a = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,__lowerCamelCase ,atol=1e-4 ) )
| 387 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class a__ ( _lowercase ):
__magic_name__ : Optional[int] = "realm"
def __init__(self : Optional[int], __UpperCAmelCase : Any=30522, __UpperCAmelCase : List[Any]=768, __UpperCAmelCase : Optional[Any]=128, __UpperCAmelCase : List[Any]=12, __UpperCAmelCase : Optional[int]=12, __UpperCAmelCase : str=8, __UpperCAmelCase : Any=3072, __UpperCAmelCase : List[Any]="gelu_new", __UpperCAmelCase : Tuple=0.1, __UpperCAmelCase : List[str]=0.1, __UpperCAmelCase : Union[str, Any]=512, __UpperCAmelCase : List[str]=2, __UpperCAmelCase : Optional[int]=0.02, __UpperCAmelCase : str=1e-12, __UpperCAmelCase : Tuple=256, __UpperCAmelCase : int=10, __UpperCAmelCase : Optional[int]=1e-3, __UpperCAmelCase : Optional[int]=5, __UpperCAmelCase : int=320, __UpperCAmelCase : Dict=13353718, __UpperCAmelCase : Dict=5000, __UpperCAmelCase : str=1, __UpperCAmelCase : List[str]=0, __UpperCAmelCase : List[Any]=2, **__UpperCAmelCase : Any, ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase, bos_token_id=__UpperCAmelCase, eos_token_id=__UpperCAmelCase, **__UpperCAmelCase )
# Common config
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = retriever_proj_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = num_candidates
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
# Reader config
SCREAMING_SNAKE_CASE : List[Any] = span_hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = max_span_width
SCREAMING_SNAKE_CASE : Any = reader_layer_norm_eps
SCREAMING_SNAKE_CASE : int = reader_beam_size
SCREAMING_SNAKE_CASE : Any = reader_seq_len
# Retrieval config
SCREAMING_SNAKE_CASE : List[str] = num_block_records
SCREAMING_SNAKE_CASE : Any = searcher_beam_size
| 355 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __lowercase (_SCREAMING_SNAKE_CASE :List[str]=32 , _SCREAMING_SNAKE_CASE :List[Any]=10 , _SCREAMING_SNAKE_CASE :Optional[Any]=1_00 , _SCREAMING_SNAKE_CASE :int=10_26 , _SCREAMING_SNAKE_CASE :List[Any]=True , _SCREAMING_SNAKE_CASE :str="data/tokenized_stories_train_wikitext103.jbl" , _SCREAMING_SNAKE_CASE :List[Any]="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = generate_datasets(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , number=_SCREAMING_SNAKE_CASE , min_len=10_26 , trim=_SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
SCREAMING_SNAKE_CASE : Any = load_gpta('''gpt2''' ).to(_SCREAMING_SNAKE_CASE )
print('''computing perplexity on objective set''' )
SCREAMING_SNAKE_CASE : List[Any] = compute_perplexity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).item()
print('''perplexity on objective set:''' , _SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :Optional[int]=15 , _SCREAMING_SNAKE_CASE :Any=1_28 , _SCREAMING_SNAKE_CASE :Any=1_00 , _SCREAMING_SNAKE_CASE :List[str]="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(_SCREAMING_SNAKE_CASE )
# Train secondary learner
SCREAMING_SNAKE_CASE : Any = train_secondary_learner(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_epochs=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , eval_freq=1_00 , igf_model_path=_SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __lowercase (_SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :Union[str, Any]=32 , _SCREAMING_SNAKE_CASE :Tuple=10_00 , _SCREAMING_SNAKE_CASE :int=16 , _SCREAMING_SNAKE_CASE :List[str]=1.0 , _SCREAMING_SNAKE_CASE :Any=recopy_gpta , _SCREAMING_SNAKE_CASE :Tuple=None , _SCREAMING_SNAKE_CASE :int=10 , _SCREAMING_SNAKE_CASE :Optional[int]="gpt2_finetuned.pt" , ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE : Any = RandomSampler(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = max_steps // (len(_SCREAMING_SNAKE_CASE )) + 1
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = recopy_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(_SCREAMING_SNAKE_CASE )
secondary_learner.eval()
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE : Any = compute_perplexity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
test_perps.append(_SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , _SCREAMING_SNAKE_CASE , ''':''' , _SCREAMING_SNAKE_CASE )
for epoch in range(int(_SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(_SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Optional[Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE : str = secondary_learner.forward(
torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE : Dict = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE : Tuple = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : int = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE : Any = compute_perplexity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
test_perps.append(_SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , _SCREAMING_SNAKE_CASE , ''':''' , _SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __lowercase ():
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_00 , type=_SCREAMING_SNAKE_CASE , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_00 , type=_SCREAMING_SNAKE_CASE , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=10_00 , type=_SCREAMING_SNAKE_CASE , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_28 , type=_SCREAMING_SNAKE_CASE , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=_SCREAMING_SNAKE_CASE , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=_SCREAMING_SNAKE_CASE , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_00 , type=_SCREAMING_SNAKE_CASE , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=10_26 , type=_SCREAMING_SNAKE_CASE , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=_SCREAMING_SNAKE_CASE , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=_SCREAMING_SNAKE_CASE , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=_SCREAMING_SNAKE_CASE , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=_SCREAMING_SNAKE_CASE , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE : int = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
SCREAMING_SNAKE_CASE : List[Any] = training_secondary_learner(
_SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_00 , min_len=10_26 , trim=_SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=_SCREAMING_SNAKE_CASE , secondary_learner=_SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 355 | 1 |
from typing import List
from .keymap import KEYMAP, get_character
def a__ ( snake_case ):
"""simple docstring"""
def decorator(snake_case ):
__SCREAMING_SNAKE_CASE : int = getattr(__UpperCamelCase , '''handle_key''' , [] )
handle += [key]
setattr(__UpperCamelCase , '''handle_key''' , __UpperCamelCase )
return func
return decorator
def a__ ( *snake_case ):
"""simple docstring"""
def decorator(snake_case ):
__SCREAMING_SNAKE_CASE : Optional[Any] = getattr(__UpperCamelCase , '''handle_key''' , [] )
handle += keys
setattr(__UpperCamelCase , '''handle_key''' , __UpperCamelCase )
return func
return decorator
class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __new__( cls : Optional[Any] , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = super().__new__(cls , _lowercase , _lowercase , _lowercase )
if not hasattr(_lowercase , '''key_handler''' ):
setattr(_lowercase , '''key_handler''' , {} )
setattr(_lowercase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
__SCREAMING_SNAKE_CASE : Optional[Any] = getattr(_lowercase , '''handle_key''' , [] )
for key in handled_keys:
__SCREAMING_SNAKE_CASE : Optional[Any] = value
return new_cls
@staticmethod
def UpperCAmelCase__ ( cls : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = get_character()
if char != KEYMAP["undefined"]:
__SCREAMING_SNAKE_CASE : List[Any] = ord(_lowercase )
__SCREAMING_SNAKE_CASE : int = cls.key_handler.get(_lowercase )
if handler:
__SCREAMING_SNAKE_CASE : Tuple = char
return handler(cls )
else:
return None
def a__ ( cls ):
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 74 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case_ : Optional[int] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case_ : Optional[Any] = F'{src_lang}-{tgt_lang}'
snake_case_ : Dict = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case_ : List[str] = os.path.join(__UpperCamelCase , """README.md""" )
print(F'Generating {path}' )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__lowerCAmelCase : str = Path(__file__).resolve().parent.parent.parent
__lowerCAmelCase : Optional[int] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = model_name.split('''-''')
__lowerCAmelCase : Optional[int] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 58 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
snake_case_ : Optional[int] = logging.get_logger("""transformers.models.encodec""")
snake_case_ : str = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
snake_case_ : Union[str, Any] = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
snake_case_ : Tuple = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
snake_case_ : int = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
snake_case_ : List[Any] = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
snake_case_ : Optional[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
snake_case_ : Optional[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
snake_case_ : Optional[Any] = []
snake_case_ : Dict = []
def lowercase_ ( _lowercase : int , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Optional[int] ):
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase : Dict = getattr(_lowercase , _lowercase )
if weight_type is not None:
UpperCAmelCase : List[str] = getattr(_lowercase , _lowercase ).shape
else:
UpperCAmelCase : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase : Optional[int] = value
elif weight_type == "weight_v":
UpperCAmelCase : Optional[int] = value
elif weight_type == "bias":
UpperCAmelCase : Tuple = value
elif weight_type == "running_mean":
UpperCAmelCase : List[Any] = value
elif weight_type == "running_var":
UpperCAmelCase : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase : List[str] = value
elif weight_type == "weight_ih_l0":
UpperCAmelCase : Any = value
elif weight_type == "weight_hh_l0":
UpperCAmelCase : Optional[int] = value
elif weight_type == "bias_ih_l0":
UpperCAmelCase : Union[str, Any] = value
elif weight_type == "bias_hh_l0":
UpperCAmelCase : int = value
elif weight_type == "weight_ih_l1":
UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_hh_l1":
UpperCAmelCase : List[Any] = value
elif weight_type == "bias_ih_l1":
UpperCAmelCase : Dict = value
elif weight_type == "bias_hh_l1":
UpperCAmelCase : Union[str, Any] = value
else:
UpperCAmelCase : int = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def lowercase_ ( _lowercase : List[Any] , _lowercase : str ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase : List[str] = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase_ ( _lowercase : int , _lowercase : Union[str, Any] , _lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCAmelCase : Optional[int] = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCAmelCase : int = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(_lowercase , _lowercase ):
logger.info(F"""{name} was ignored""" )
continue
UpperCAmelCase : Tuple = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCAmelCase : List[Any] = key.split(".*." )
if prefix in name and suffix in name:
UpperCAmelCase : Optional[int] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
UpperCAmelCase : Optional[int] = True
if "*" in mapped_key:
UpperCAmelCase : int = name.split(_lowercase )[0].split("." )[-2]
UpperCAmelCase : str = mapped_key.replace("*" , _lowercase )
if "weight_g" in name:
UpperCAmelCase : str = "weight_g"
elif "weight_v" in name:
UpperCAmelCase : Dict = "weight_v"
elif "weight_ih_l0" in name:
UpperCAmelCase : Optional[int] = "weight_ih_l0"
elif "weight_hh_l0" in name:
UpperCAmelCase : List[str] = "weight_hh_l0"
elif "bias_ih_l0" in name:
UpperCAmelCase : int = "bias_ih_l0"
elif "bias_hh_l0" in name:
UpperCAmelCase : Tuple = "bias_hh_l0"
elif "weight_ih_l1" in name:
UpperCAmelCase : Union[str, Any] = "weight_ih_l1"
elif "weight_hh_l1" in name:
UpperCAmelCase : Optional[Any] = "weight_hh_l1"
elif "bias_ih_l1" in name:
UpperCAmelCase : str = "bias_ih_l1"
elif "bias_hh_l1" in name:
UpperCAmelCase : str = "bias_hh_l1"
elif "bias" in name:
UpperCAmelCase : str = "bias"
elif "weight" in name:
UpperCAmelCase : List[str] = "weight"
elif "running_mean" in name:
UpperCAmelCase : str = "running_mean"
elif "running_var" in name:
UpperCAmelCase : Optional[Any] = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase : List[Any] = "num_batches_tracked"
else:
UpperCAmelCase : Any = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def lowercase_ ( _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[int] , _lowercase : Tuple=None , _lowercase : Tuple=None , ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase : Union[str, Any] = EncodecConfig.from_pretrained(_lowercase )
else:
UpperCAmelCase : Any = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCAmelCase : str = [8, 5, 4, 4]
UpperCAmelCase : Optional[int] = [2.2]
UpperCAmelCase : Any = 64
UpperCAmelCase : int = 3_20_00
UpperCAmelCase : Optional[int] = 20_48
UpperCAmelCase : Any = False
UpperCAmelCase : int = False
UpperCAmelCase : Optional[int] = False
elif model_name == "encodec_48khz":
UpperCAmelCase : Optional[int] = [8, 5, 4, 2]
UpperCAmelCase : Dict = [3.0, 6.0, 12.0, 24.0]
UpperCAmelCase : List[str] = 4_80_00
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Union[str, Any] = "time_group_norm"
UpperCAmelCase : List[Any] = True
UpperCAmelCase : Any = 1.0
UpperCAmelCase : List[str] = 0.0_1
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCAmelCase : Optional[int] = EncodecModel(_lowercase )
UpperCAmelCase : Optional[int] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_lowercase )
UpperCAmelCase : List[Any] = torch.load(_lowercase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCAmelCase : str = original_checkpoint["best_state"]
recursively_load_weights(_lowercase , _lowercase , _lowercase )
model.save_pretrained(_lowercase )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(_lowercase )
model.push_to_hub(_lowercase )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
snake_case_ : Tuple = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 702 |
"""simple docstring"""
import math
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
return math.sqrt(_lowercase ) * math.sqrt(_lowercase ) == num
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Tuple = n
while left <= right:
UpperCAmelCase : Tuple = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase : List[str] = mid - 1
else:
UpperCAmelCase : Any = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 0 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __UpperCamelCase ( lowercase_ : Optional[int] , lowercase_ : Union[str, Any] ):
"""simple docstring"""
a_ = XCLIPTextConfig()
# derive patch size from model name
a_ = model_name.find('patch' )
a_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
a_ = XCLIPVisionConfig(patch_size=lowercase_ , num_frames=lowercase_ )
if "large" in model_name:
a_ = 768
a_ = 3_072
a_ = 12
a_ = 1_024
a_ = 4_096
a_ = 16
a_ = 24
a_ = 768
a_ = 3_072
if model_name == "xclip-large-patch14-16-frames":
a_ = 336
a_ = XCLIPConfig.from_text_vision_configs(lowercase_ , lowercase_ )
if "large" in model_name:
a_ = 768
return config
def __UpperCamelCase ( lowercase_ : int ):
"""simple docstring"""
if name == "token_embedding.weight":
a_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
a_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
a_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
a_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
a_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
a_ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
a_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
a_ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
a_ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
a_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
a_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
a_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
a_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
a_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
a_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
a_ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
a_ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
a_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
a_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
a_ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
a_ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
a_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def __UpperCamelCase ( lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a_ = orig_state_dict.pop(lowercase_ )
if "attn.in_proj" in key:
a_ = key.split('.' )
if key.startswith('visual' ):
a_ = key_split[3]
a_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
a_ = val[
:dim, :
]
a_ = val[
dim : dim * 2, :
]
a_ = val[
-dim:, :
]
else:
a_ = val[
:dim
]
a_ = val[
dim : dim * 2
]
a_ = val[
-dim:
]
else:
if "weight" in key:
a_ = val[
:dim, :
]
a_ = val[
dim : dim * 2, :
]
a_ = val[
-dim:, :
]
else:
a_ = val[:dim]
a_ = val[
dim : dim * 2
]
a_ = val[-dim:]
elif key.startswith('mit' ):
a_ = key_split[2]
a_ = config.vision_config.mit_hidden_size
if "weight" in key:
a_ = val[:dim, :]
a_ = val[dim : dim * 2, :]
a_ = val[-dim:, :]
else:
a_ = val[:dim]
a_ = val[dim : dim * 2]
a_ = val[-dim:]
else:
a_ = key_split[2]
a_ = config.text_config.hidden_size
if "weight" in key:
a_ = val[:dim, :]
a_ = val[
dim : dim * 2, :
]
a_ = val[-dim:, :]
else:
a_ = val[:dim]
a_ = val[
dim : dim * 2
]
a_ = val[-dim:]
else:
a_ = rename_key(lowercase_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
a_ = val.T
a_ = val
return orig_state_dict
def __UpperCamelCase ( lowercase_ : Optional[int] ):
"""simple docstring"""
if num_frames == 8:
a_ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
a_ = 'eating_spaghetti.npy'
elif num_frames == 32:
a_ = 'eating_spaghetti_32_frames.npy'
a_ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=lowercase_ , repo_type='dataset' , )
a_ = np.load(lowercase_ )
return list(lowercase_ )
def __UpperCamelCase ( lowercase_ : Optional[Any] , lowercase_ : Any=None , lowercase_ : List[Any]=False ):
"""simple docstring"""
a_ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
a_ = model_to_url[model_name]
a_ = 8
if "16-frames" in model_name:
a_ = 16
elif "shot" in model_name:
a_ = 32
a_ = get_xclip_config(lowercase_ , lowercase_ )
a_ = XCLIPModel(lowercase_ )
model.eval()
if "drive" in checkpoint_url:
a_ = 'pytorch_model.bin'
gdown.cached_download(lowercase_ , lowercase_ , quiet=lowercase_ )
a_ = torch.load(lowercase_ , map_location='cpu' )['model']
else:
a_ = torch.hub.load_state_dict_from_url(lowercase_ )['model']
a_ = convert_state_dict(lowercase_ , lowercase_ )
a_ = XCLIPModel(lowercase_ )
a_ , a_ = model.load_state_dict(lowercase_ , strict=lowercase_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
a_ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
a_ = VideoMAEImageProcessor(size=lowercase_ )
a_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
a_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
a_ = XCLIPProcessor(image_processor=lowercase_ , tokenizer=lowercase_ )
a_ = prepare_video(lowercase_ )
a_ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=lowercase_ , return_tensors='pt' , padding=lowercase_ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
a_ = model(**lowercase_ )
# Verify outputs
a_ = outputs.logits_per_video
a_ = logits_per_video.softmax(dim=1 )
print('Probs:' , lowercase_ )
# kinetics-400
if model_name == "xclip-base-patch32":
a_ = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
a_ = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
a_ = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
a_ = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
a_ = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
a_ = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
a_ = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
a_ = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
a_ = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
a_ = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
a_ = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
a_ = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
a_ = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
a_ = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
a_ = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
a_ = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
a_ = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
a_ = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F'Model name {model_name} not supported' )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase_ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(lowercase_ , organization='nielsr' )
processor.push_to_hub(lowercase_ , organization='nielsr' )
slow_tokenizer.push_to_hub(lowercase_ , organization='nielsr' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__lowerCAmelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 536 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowerCAmelCase = False
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self ):
"""simple docstring"""
return 12
@property
def _a ( self ):
"""simple docstring"""
return 12
@property
def _a ( self ):
"""simple docstring"""
return 32
@property
def _a ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def _a ( self ):
"""simple docstring"""
a_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _a ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCamelCase__ )
@property
def _a ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = 12
a_ = 12
a_ = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
a_ = TransformeraDModel(**UpperCamelCase__ )
return model
def _a ( self ):
"""simple docstring"""
a_ = 'cpu'
a_ = self.dummy_vqvae
a_ = self.dummy_text_encoder
a_ = self.dummy_tokenizer
a_ = self.dummy_transformer
a_ = VQDiffusionScheduler(self.num_embed )
a_ = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase__ )
a_ = VQDiffusionPipeline(
vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , )
a_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a_ = 'teddy bear playing in the pool'
a_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
a_ = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='np' )
a_ = output.images
a_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
a_ = pipe(
[prompt] , generator=UpperCamelCase__ , output_type='np' , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0]
a_ = image[0, -3:, -3:, -1]
a_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
a_ = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self ):
"""simple docstring"""
a_ = 'cpu'
a_ = self.dummy_vqvae
a_ = self.dummy_text_encoder
a_ = self.dummy_tokenizer
a_ = self.dummy_transformer
a_ = VQDiffusionScheduler(self.num_embed )
a_ = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
a_ = VQDiffusionPipeline(
vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , )
a_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a_ = 'teddy bear playing in the pool'
a_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
a_ = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='np' )
a_ = output.images
a_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
a_ = pipe(
[prompt] , generator=UpperCamelCase__ , output_type='np' , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0]
a_ = image[0, -3:, -3:, -1]
a_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
a_ = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
"""simple docstring"""
a_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
a_ = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
a_ = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
a_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
a_ = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCamelCase__ , output_type='np' , )
a_ = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 536 | 1 |
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class A ( _a ):
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_a = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
_a = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
_a = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''bool''' ) , type=Value('''int64''' ) ) )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_a = pa.array(TypedSequence([1, 2, 3] , type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_a = pa.array(TypedSequence(['''foo''', '''bar'''] , type=Value('''int64''' ) ) )
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_a = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=Value('''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_a = pa.array(TypedSequence(['''foo''', '''bar'''] , type=ArrayaD((1, 3) , '''int64''' ) ) )
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_a = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_a = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
import PIL.Image
_a = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'''datasets.arrow_writer.cast_to_python_objects''' , side_effect=lowerCAmelCase_ ) as mock_cast_to_python_objects:
_a = pa.array(TypedSequence([{'''path''': None, '''bytes''': B'''image_bytes'''}, pil_image] , type=Image() ) )
_a , _a = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''' , lowerCAmelCase_ )
self.assertFalse(kwargs['''optimize_list_casting'''] )
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : int ):
'''simple docstring'''
_a = pa.BufferReader(UpperCamelCase ) if isinstance(UpperCamelCase , pa.Buffer ) else pa.memory_map(UpperCamelCase )
_a = pa.ipc.open_stream(UpperCamelCase )
_a = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : str ):
'''simple docstring'''
_a = pa.BufferOutputStream()
_a = pa.schema(UpperCamelCase ) if fields else None
with ArrowWriter(stream=UpperCamelCase , schema=UpperCamelCase , writer_batch_size=UpperCamelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case_ ():
'''simple docstring'''
_a = pa.BufferOutputStream()
_a = Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} )
with ArrowWriter(stream=UpperCamelCase , features=UpperCamelCase ) as writer:
writer.write({'''labels''': 0} )
writer.write({'''labels''': 1} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_a = pa.BufferReader(output.getvalue() )
_a = pa.ipc.open_stream(UpperCamelCase )
_a = f.read_all()
_a = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
def snake_case_ (UpperCamelCase : Any ):
'''simple docstring'''
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase , writer_batch_size=UpperCamelCase , hash_salt='''split_name''' , check_duplicates=UpperCamelCase , ) as writer:
with pytest.raises(UpperCamelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=[1, 2] )
_a , _a = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def snake_case_ (UpperCamelCase : Any ):
'''simple docstring'''
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase , writer_batch_size=UpperCamelCase , hash_salt='''split_name''' , check_duplicates=UpperCamelCase , ) as writer:
with pytest.raises(UpperCamelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=10 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=10 )
_a , _a = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase , writer_batch_size=UpperCamelCase , hash_salt='''split_name''' , check_duplicates=UpperCamelCase , ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=2 )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = pa.BufferOutputStream()
_a = pa.schema(UpperCamelCase ) if fields else None
with ArrowWriter(stream=UpperCamelCase , schema=UpperCamelCase , writer_batch_size=UpperCamelCase ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
writer.write_batch({'''col_1''': [], '''col_2''': []} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Any ):
'''simple docstring'''
_a = pa.BufferOutputStream()
_a = pa.schema(UpperCamelCase ) if fields else None
with ArrowWriter(stream=UpperCamelCase , schema=UpperCamelCase , writer_batch_size=UpperCamelCase ) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = pa.BufferOutputStream()
_a = pa.schema(UpperCamelCase ) if fields else None
with ArrowWriter(stream=UpperCamelCase , schema=UpperCamelCase , writer_batch_size=UpperCamelCase ) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) )
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case_ ():
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_a = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
_a = os.path.join(UpperCamelCase , '''test.arrow''' )
with ArrowWriter(path=UpperCamelCase , schema=pa.schema(UpperCamelCase ) ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase , metadata=writer._schema.metadata )
_check_output(UpperCamelCase , 1 )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
if pa.types.is_list(UpperCamelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Any ):
'''simple docstring'''
if isinstance(lst[0] , UpperCamelCase ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase )
else:
_a = value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''' , [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = pa.array(TypedSequence(UpperCamelCase , optimized_int_type=UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''' , [
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
] , )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = pa.array(OptimizedTypedSequence(UpperCamelCase , col=UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_a = copy.deepcopy(UpperCamelCase )
_a = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase , UpperCamelCase )
_a = pa.array(OptimizedTypedSequence(UpperCamelCase , col=UpperCamelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''' , [False, True] )
def snake_case_ (UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_a = str(tmp_path / '''dataset-train.arrow''' )
try:
with ArrowWriter(path=UpperCamelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def snake_case_ (UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = '''mock://dataset-train.arrow'''
with ArrowWriter(path=UpperCamelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase )
def snake_case_ ():
'''simple docstring'''
_a = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_a = pa.BufferReader(output.getvalue() )
_a = pq.read_table(UpperCamelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''' , [False, True] )
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
import PIL.Image
_a = str(tmp_path / '''test_image_rgb.jpg''' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase , format='''png''' )
_a = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase , features=Features({'''image''': Image()} ) , embed_local_files=UpperCamelCase ) as writer:
writer.write({'''image''': image_path} )
writer.finalize()
_a = pa.BufferReader(output.getvalue() )
_a = pq.read_table(UpperCamelCase )
_a = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''] , UpperCamelCase )
with open(UpperCamelCase , '''rb''' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def snake_case_ ():
'''simple docstring'''
_a = pa.schema([pa.field('''col_1''' , pa.string() , nullable=UpperCamelCase )] )
_a = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase )
assert writer._schema == pa.schema([pa.field('''col_1''' , pa.string() )] )
| 377 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
_snake_case : int = None
_snake_case : Tuple = {
'7B': 11008,
'13B': 13824,
'30B': 17920,
'65B': 22016,
'70B': 28672,
}
_snake_case : int = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=256 ):
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
with open(UpperCamelCase , '''r''' ) as f:
return json.load(UpperCamelCase )
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
with open(UpperCamelCase , '''w''' ) as f:
json.dump(UpperCamelCase , UpperCamelCase )
def snake_case_ (UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str=True ):
'''simple docstring'''
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
_a = os.path.join(UpperCamelCase , '''tmp''' )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
_a = read_json(os.path.join(UpperCamelCase , '''params.json''' ) )
_a = NUM_SHARDS[model_size]
_a = params['''n_layers''']
_a = params['''n_heads''']
_a = n_heads // num_shards
_a = params['''dim''']
_a = dim // n_heads
_a = 10000.0
_a = 1.0 / (base ** (torch.arange(0 , UpperCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_a = params['''n_kv_heads'''] # for GQA / MQA
_a = n_heads_per_shard // num_key_value_heads
_a = dim // num_key_value_heads
else: # compatibility with other checkpoints
_a = n_heads
_a = n_heads_per_shard
_a = dim
# permute for sliced rotary
def permute(UpperCamelCase : List[str] , UpperCamelCase : str=n_heads , UpperCamelCase : int=dim , UpperCamelCase : Optional[Any]=dim ):
return w.view(UpperCamelCase , dima // n_heads // 2 , 2 , UpperCamelCase ).transpose(1 , 2 ).reshape(UpperCamelCase , UpperCamelCase )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_a = torch.load(os.path.join(UpperCamelCase , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
_a = [
torch.load(os.path.join(UpperCamelCase , f'consolidated.{i:02d}.pth' ) , map_location='''cpu''' )
for i in range(UpperCamelCase )
]
_a = 0
_a = {'''weight_map''': {}}
for layer_i in range(UpperCamelCase ):
_a = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_a = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_a = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_a = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(UpperCamelCase , UpperCamelCase , UpperCamelCase )
for i in range(UpperCamelCase )
] , dim=0 , ).reshape(UpperCamelCase , UpperCamelCase ) )
_a = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
for i in range(UpperCamelCase )
] , dim=0 , ).reshape(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
_a = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
for i in range(UpperCamelCase )
] , dim=0 , ).reshape(UpperCamelCase , UpperCamelCase )
_a = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(UpperCamelCase )] , dim=1 )
_a = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(UpperCamelCase )] , dim=0 )
_a = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(UpperCamelCase )] , dim=1 )
_a = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(UpperCamelCase )] , dim=0 )
_a = inv_freq
for k, v in state_dict.items():
_a = filename
param_count += v.numel()
torch.save(UpperCamelCase , os.path.join(UpperCamelCase , UpperCamelCase ) )
_a = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_a = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
_a = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(UpperCamelCase )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(UpperCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
_a = filename
param_count += v.numel()
torch.save(UpperCamelCase , os.path.join(UpperCamelCase , UpperCamelCase ) )
# Write configs
_a = {'''total_size''': param_count * 2}
write_json(UpperCamelCase , os.path.join(UpperCamelCase , '''pytorch_model.bin.index.json''' ) )
_a = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
_a = params['''multiple_of'''] if '''multiple_of''' in params else 256
_a = LlamaConfig(
hidden_size=UpperCamelCase , intermediate_size=compute_intermediate_size(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=UpperCamelCase , )
config.save_pretrained(UpperCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
_a = LlamaForCausalLM.from_pretrained(UpperCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=UpperCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(UpperCamelCase , safe_serialization=UpperCamelCase )
shutil.rmtree(UpperCamelCase )
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ):
'''simple docstring'''
_a = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_a = tokenizer_class(UpperCamelCase )
tokenizer.save_pretrained(UpperCamelCase )
def snake_case_ ():
'''simple docstring'''
_a = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=UpperCamelCase , help='''Whether or not to save using `safetensors`.''' )
_a = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_a = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , UpperCamelCase )
if __name__ == "__main__":
main()
| 377 | 1 |
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
_A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_heads' ) )
class lowercase_ :
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : str=3 , _UpperCAmelCase : List[Any]=[16, 48, 96] , _UpperCAmelCase : Any=[1, 3, 6] , _UpperCAmelCase : Optional[int]=[1, 2, 10] , _UpperCAmelCase : Optional[Any]=[7, 3, 3] , _UpperCAmelCase : List[Any]=[4, 2, 2] , _UpperCAmelCase : Optional[Any]=[2, 1, 1] , _UpperCAmelCase : Optional[Any]=[2, 2, 2] , _UpperCAmelCase : Dict=[False, False, True] , _UpperCAmelCase : Optional[Any]=[0.0, 0.0, 0.0] , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Tuple=1E-1_2 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=2 , ):
_A = parent
_A = batch_size
_A = image_size
_A = patch_sizes
_A = patch_stride
_A = patch_padding
_A = is_training
_A = use_labels
_A = num_labels
_A = num_channels
_A = embed_dim
_A = num_heads
_A = stride_kv
_A = depth
_A = cls_token
_A = attention_drop_rate
_A = initializer_range
_A = layer_norm_eps
def lowerCAmelCase_ ( self : Dict ):
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : Optional[int] ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ):
_A = CvtModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
_A = (self.image_size, self.image_size)
_A , _A = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_A = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_A = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ):
_A = self.num_labels
_A = CvtForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Tuple ):
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Any = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCAmelCase : List[Any] = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : Tuple = False
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : List[Any] = False
def lowerCAmelCase_ ( self : Any ):
_A = CvtModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self : List[str] ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def lowerCAmelCase_ ( self : Dict ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : List[str] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : str ):
def check_hidden_states_output(_UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] ):
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_A = outputs.hidden_states
_A = len(self.model_tester.depth )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = CvtModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ( ) -> List[Any]:
'''simple docstring'''
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : int ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase_ ( self : str ):
_A = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_UpperCAmelCase )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCAmelCase )
# verify the logits
_A = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_A = torch.tensor([0.9285, 0.9015, -0.3150] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 7 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCamelCase_ = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCamelCase_ = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class _SCREAMING_SNAKE_CASE:
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : Node | None
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Node | None = None
for i in sorted(SCREAMING_SNAKE_CASE__ ,reverse=SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Tuple = Node(SCREAMING_SNAKE_CASE__ ,self.head )
def __iter__( self ) -> Iterator[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE :List[Any] = node.next_node
def __len__( self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self ) -> str:
"""simple docstring"""
return " -> ".join([str(SCREAMING_SNAKE_CASE__ ) for node in self] )
def __lowerCamelCase ( a_ : SortedLinkedList , a_ : SortedLinkedList ) -> SortedLinkedList:
return SortedLinkedList(list(a_ ) + list(a_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 498 | 0 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=6 , __UpperCAmelCase=17 , __UpperCAmelCase=23 , __UpperCAmelCase=11 , __UpperCAmelCase=True , ) -> List[Any]:
A : Union[str, Any] = parent
A : Dict = batch_size
A : int = seq_length
A : List[Any] = act_dim
A : Any = state_dim
A : Optional[int] = hidden_size
A : Optional[int] = max_length
A : List[Any] = is_training
def snake_case ( self ) -> Optional[int]:
A : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
A : Dict = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
A : int = floats_tensor((self.batch_size, self.seq_length, 1) )
A : Tuple = floats_tensor((self.batch_size, self.seq_length, 1) )
A : str = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
A : Dict = random_attention_mask((self.batch_size, self.seq_length) )
A : Any = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def snake_case ( self ) -> int:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[str]:
A : str = DecisionTransformerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Any = model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def snake_case ( self ) -> Tuple:
A : Any = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = config_and_inputs
A : Optional[int] = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Dict = (DecisionTransformerModel,) if is_torch_available() else ()
UpperCAmelCase_ : Dict = ()
UpperCAmelCase_ : List[str] = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
UpperCAmelCase_ : Union[str, Any] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : int = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : Dict = False
def snake_case ( self ) -> int:
A : Any = DecisionTransformerModelTester(self )
A : List[str] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def snake_case ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case ( self ) -> int:
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@slow
def snake_case ( self ) -> Optional[Any]:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : str = DecisionTransformerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def snake_case ( self ) -> Optional[Any]:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(__UpperCAmelCase )
A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Optional[Any] = [*signature.parameters.keys()]
A : Dict = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(__UpperCAmelCase )] , __UpperCAmelCase )
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ) -> Dict:
A : Dict = 2 # number of steps of autoregressive prediction we will perform
A : Optional[int] = 10 # defined by the RL environment, may be normalized
A : Optional[int] = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
A : Optional[Any] = model.to(__UpperCAmelCase )
A : List[str] = model.config
torch.manual_seed(0 )
A : Union[str, Any] = torch.randn(1 , 1 , config.state_dim ).to(device=__UpperCAmelCase , dtype=torch.floataa ) # env.reset()
A : Union[str, Any] = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=__UpperCAmelCase )
A : int = torch.tensor(__UpperCAmelCase , device=__UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
A : int = state
A : int = torch.zeros(1 , 0 , config.act_dim , device=__UpperCAmelCase , dtype=torch.floataa )
A : str = torch.zeros(1 , 0 , device=__UpperCAmelCase , dtype=torch.floataa )
A : int = torch.tensor(0 , device=__UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(__UpperCAmelCase ):
A : str = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__UpperCAmelCase )] , dim=1 )
A : Dict = torch.cat([rewards, torch.zeros(1 , 1 , device=__UpperCAmelCase )] , dim=1 )
A : List[str] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
A , A , A : List[str] = model(
states=__UpperCAmelCase , actions=__UpperCAmelCase , rewards=__UpperCAmelCase , returns_to_go=__UpperCAmelCase , timesteps=__UpperCAmelCase , attention_mask=__UpperCAmelCase , return_dict=__UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
A , A , A , A : Tuple = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
A : Union[str, Any] = action_pred[0, -1]
A : Dict = torch.cat([states, state] , dim=1 )
A : int = returns_to_go[0, -1] - reward
A : Optional[int] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
A : Any = torch.cat(
[timesteps, torch.ones((1, 1) , device=__UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 423 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase : Any = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def snake_case__ ( lowerCamelCase_ ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase_ )
def snake_case__ ( lowerCamelCase_ ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
A : Union[str, Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase_ , id=lowerCamelCase_ )
| 423 | 1 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : np.array ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 179 |
from __future__ import annotations
def _a ( UpperCamelCase_ : list[float] ) -> float:
"""simple docstring"""
lowerCAmelCase__ = 0.00
lowerCAmelCase__ = 0
for resistor in resistors:
if resistor <= 0:
lowerCAmelCase__ = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(UpperCamelCase_ )
first_sum += 1 / float(UpperCamelCase_ )
index += 1
return 1 / first_sum
def _a ( UpperCamelCase_ : list[float] ) -> float:
"""simple docstring"""
lowerCAmelCase__ = 0.00
lowerCAmelCase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCAmelCase__ = F"Resistor at index {index} has a negative value!"
raise ValueError(UpperCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 | 0 |
from __future__ import annotations
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
create_state_space_tree(lowerCAmelCase__ , [] , 0 , [0 for i in range(len(lowerCAmelCase__ ) )] )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
"""simple docstring"""
if index == len(lowerCAmelCase__ ):
print(lowerCAmelCase__ )
return
for i in range(len(lowerCAmelCase__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : Optional[Any] = True
create_state_space_tree(lowerCAmelCase__ , lowerCAmelCase__ , index + 1 , lowerCAmelCase__ )
current_sequence.pop()
_lowerCAmelCase : Union[str, Any] = False
snake_case = [3, 1, 2, 4]
generate_all_permutations(sequence)
snake_case = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 587 | from __future__ import annotations
from typing import Generic, TypeVar
snake_case = TypeVar("T")
class __A ( Generic[T] ):
'''simple docstring'''
def __init__( self , _snake_case ):
_lowerCAmelCase : List[Any] = data
_lowerCAmelCase : Dict = self
_lowerCAmelCase : Tuple = 0
class __A ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
# map from node name to the node object
_lowerCAmelCase : dict[T, DisjointSetTreeNode[T]] = {}
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
# create a new set with x as its member
_lowerCAmelCase : List[str] = DisjointSetTreeNode(_snake_case )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
# find the set x belongs to (with path-compression)
_lowerCAmelCase : Dict = self.map[data]
if elem_ref != elem_ref.parent:
_lowerCAmelCase : Tuple = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
# helper function for union operation
if nodea.rank > nodea.rank:
_lowerCAmelCase : int = nodea
else:
_lowerCAmelCase : Optional[Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
# merge 2 disjoint sets
self.link(self.find_set(_snake_case ) , self.find_set(_snake_case ) )
class __A ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
# connections: map from the node to the neighbouring nodes (with weights)
_lowerCAmelCase : dict[T, dict[T, int]] = {}
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
_lowerCAmelCase : Any = {}
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case ):
# add an edge with the given weight
self.add_node(_snake_case )
self.add_node(_snake_case )
_lowerCAmelCase : int = weight
_lowerCAmelCase : Optional[int] = weight
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _snake_case : x[2] )
# creating the disjoint set
_lowerCAmelCase : Tuple = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_snake_case )
# MST generation
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : int = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = edges[index]
index += 1
_lowerCAmelCase : Dict = disjoint_set.find_set(_snake_case )
_lowerCAmelCase : List[str] = disjoint_set.find_set(_snake_case )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_snake_case , _snake_case , _snake_case )
disjoint_set.union(_snake_case , _snake_case )
return graph
| 587 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
lowercase__ : Union[str, Any] = gray_code_sequence_string(__snake_case )
#
# convert them to integers
for i in range(len(__snake_case ) ):
lowercase__ : List[Any] = int(sequence[i] , 2 )
return sequence
def __UpperCAmelCase ( __lowerCamelCase ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowercase__ : str = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowercase__ : Tuple = gray_code_sequence_string(bit_count - 1 )
lowercase__ : Any = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowercase__ : Tuple = '''0''' + smaller_sequence[i]
sequence.append(__snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowercase__ : Tuple = '''1''' + smaller_sequence[i]
sequence.append(__snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 560 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =[True] * limit
lowerCamelCase_ =False
lowerCamelCase_ =False
lowerCamelCase_ =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ =i * 2
while index < limit:
lowerCamelCase_ =False
lowerCamelCase_ =index + i
lowerCamelCase_ =[2]
for i in range(3 , __snake_case , 2 ):
if is_prime[i]:
primes.append(__snake_case )
return primes
def a_ ( __snake_case : int = 100_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =prime_sieve(__snake_case )
lowerCamelCase_ =0
lowerCamelCase_ =0
for i in range(len(__snake_case ) ):
for j in range(i + length , len(__snake_case ) ):
lowerCamelCase_ =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ =j - i
lowerCamelCase_ =sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 676 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 717 | '''simple docstring'''
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
pass
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
pass
class UpperCAmelCase__ :
"""simple docstring"""
def __init__(self ) -> List[str]:
lowercase_ : Optional[int] = [
[],
[],
[],
]
def _lowerCamelCase (self , _a , _a ) -> None:
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(_a )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def _lowerCamelCase (self ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__(self ) -> str:
return "\n".join(f'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__(self ) -> str:
lowercase_ : List[Any] = []
def _lowerCamelCase (self , _a ) -> None:
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(_a )
def _lowerCamelCase (self ) -> int:
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
lowercase_ : Tuple = min(self.queue )
self.queue.remove(_a )
return data
def __str__(self ) -> str:
return str(self.queue )
def _UpperCamelCase ( ):
lowercase_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(SCREAMING_SNAKE_CASE_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(SCREAMING_SNAKE_CASE_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _UpperCamelCase ( ):
lowercase_ : Optional[int] = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(SCREAMING_SNAKE_CASE_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(SCREAMING_SNAKE_CASE_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 438 | 0 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
SCREAMING_SNAKE_CASE__ = 1_0
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
for i in range(a_ , a_ ):
if array[i] == target:
return i
return -1
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
A__ = 0
A__ = len(a_ )
while left <= right:
if right - left < precision:
return lin_search(a_ , a_ , a_ , a_ )
A__ = (left + right) // 3 + 1
A__ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
A__ = one_third - 1
elif array[two_third] < target:
A__ = two_third + 1
else:
A__ = one_third + 1
A__ = two_third - 1
else:
return -1
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
if left < right:
if right - left < precision:
return lin_search(a_ , a_ , a_ , a_ )
A__ = (left + right) // 3 + 1
A__ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(a_ , one_third - 1 , a_ , a_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , a_ , a_ , a_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , a_ , a_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = input('''Enter numbers separated by comma:\n''').strip()
SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
SCREAMING_SNAKE_CASE__ = int(input('''Enter the number to be found in the list:\n''').strip())
SCREAMING_SNAKE_CASE__ = ite_ternary_search(collection, target)
SCREAMING_SNAKE_CASE__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'Iterative search: {target} found at positions: {resulta}')
print(f'Recursive search: {target} found at positions: {resulta}')
else:
print('''Not found''')
| 9 | def _lowerCamelCase ( a_ : str):
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def _lowerCamelCase ( a_ : str):
lowerCamelCase :Union[str, Any] = credit_card_number
lowerCamelCase :Tuple = 0
lowerCamelCase :Any = len(a_) - 2
for i in range(a_ , -1 , -2):
# double the value of every second digit
lowerCamelCase :Any = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCamelCase :Optional[int] = cc_number[:i] + str(a_) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(a_) - 1 , -1 , -2):
total += int(cc_number[i])
return total % 10 == 0
def _lowerCamelCase ( a_ : str):
lowerCamelCase :Union[str, Any] = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters.")
return False
if not 13 <= len(a_) <= 16:
print(F"{error_message} of its length.")
return False
if not validate_initial_digits(a_):
print(F"{error_message} of its first two digits.")
return False
if not luhn_validation(a_):
print(F"{error_message} it fails the Luhn check.")
return False
print(F"{credit_card_number} is a valid credit card number.")
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 166 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Tuple:
__UpperCamelCase : List[Any] = filter(lambda __lowerCAmelCase : p.requires_grad , model.parameters() )
__UpperCamelCase : Union[str, Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCamelCase = logging.getLogger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> List[str]:
if metric == "rouge2":
__UpperCamelCase : Union[str, Any] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
__UpperCamelCase : Union[str, Any] = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
__UpperCamelCase : Tuple = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
""" function.""" )
__UpperCamelCase : Union[str, Any] = ModelCheckpoint(
dirpath=__lowerCAmelCase , filename=__lowerCAmelCase , monitor=f'val_{metric}' , mode="""max""" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ) -> Dict:
return EarlyStopping(
monitor=f'val_{metric}' , mode="""min""" if """loss""" in metric else """max""" , patience=__lowerCAmelCase , verbose=__lowerCAmelCase , )
class _A ( pl.Callback ):
def a ( self : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : List[str] = {f'lr_group_{i}': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCamelCase__ )
@rank_zero_only
def a ( self : Dict , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : pl.LightningModule , lowerCamelCase__ : str , lowerCamelCase__ : List[str]=True ):
"""simple docstring"""
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
__UpperCamelCase : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
__UpperCamelCase : Optional[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__UpperCamelCase : Optional[int] = od / """test_results.txt"""
__UpperCamelCase : List[str] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCamelCase : List[Any] = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
__UpperCamelCase : int = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=lowerCamelCase__ )
generations_file.parent.mkdir(exist_ok=lowerCamelCase__ )
with open(lowerCamelCase__ , """a+""" ) as writer:
for key in sorted(lowerCamelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCamelCase : Union[str, Any] = metrics[key]
if isinstance(lowerCamelCase__ , torch.Tensor ):
__UpperCamelCase : int = val.item()
__UpperCamelCase : Optional[int] = f'{key}: {val:.6f}\n'
writer.write(lowerCamelCase__ )
if not save_generations:
return
if "preds" in metrics:
__UpperCamelCase : Optional[Any] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(lowerCamelCase__ )
@rank_zero_only
def a ( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ):
"""simple docstring"""
try:
__UpperCamelCase : Tuple = pl_module.model.model.num_parameters()
except AttributeError:
__UpperCamelCase : Optional[int] = pl_module.model.num_parameters()
__UpperCamelCase : List[str] = count_trainable_parameters(lowerCamelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def a ( self : Dict , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCamelCase__ , lowerCamelCase__ , """test""" )
@rank_zero_only
def a ( self : Union[str, Any] , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : Optional[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 515 |
from __future__ import annotations
import math
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : int ) -> float:
__UpperCamelCase : Optional[int] = u
for i in range(1 , __lowerCAmelCase ):
__UpperCamelCase : int = temp * (u - i)
return temp
def __lowerCamelCase ( ) -> None:
__UpperCamelCase : Optional[int] = int(input("""enter the numbers of values: """ ) )
__UpperCamelCase : list[list[float]] = []
for _ in range(__lowerCAmelCase ):
y.append([] )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
y[i].append(__lowerCAmelCase )
__UpperCamelCase : int = 0
print("""enter the values of parameters in a list: """ )
__UpperCamelCase : str = list(map(__lowerCAmelCase , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(__lowerCAmelCase ):
__UpperCamelCase : Optional[int] = float(input() )
__UpperCamelCase : Union[str, Any] = int(input("""enter the value to interpolate: """ ) )
__UpperCamelCase : Union[str, Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __lowerCAmelCase ):
for j in range(n - i ):
__UpperCamelCase : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1]
__UpperCamelCase : Optional[Any] = y[0][0]
for i in range(1 , __lowerCAmelCase ):
summ += (ucal(__lowerCAmelCase , __lowerCAmelCase ) * y[0][i]) / math.factorial(__lowerCAmelCase )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 515 | 1 |
from torch import nn
def _lowerCamelCase ( __lowerCamelCase ) -> Tuple:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"Unsupported activation function: {act_fn}" )
| 79 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 | 0 |
import heapq
def lowerCAmelCase_ ( __a ) -> set[int]:
"""simple docstring"""
lowerCamelCase__: list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__a , [-1 * len(__a ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowerCamelCase__: Optional[int] =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowerCamelCase__: Optional[int] =heapq.heappop(__a )[1][0]
chosen_vertices.add(__a )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowerCamelCase__: Union[str, Any] =elem[1][1].index(__a )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__a )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 705 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = TextToVideoSDPipeline
lowercase_ = TEXT_TO_IMAGE_PARAMS
lowercase_ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowercase_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
torch.manual_seed(0)
lowerCamelCase__: Optional[int] =UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCamelCase__: Union[str, Any] =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0)
lowerCamelCase__: List[str] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
lowerCamelCase__: Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase__: Optional[Any] =CLIPTextModel(UpperCAmelCase_)
lowerCamelCase__: Dict =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
lowerCamelCase__: Tuple ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]=0) ->Union[str, Any]:
'''simple docstring'''
if str(UpperCAmelCase_).startswith("mps"):
lowerCamelCase__: Optional[int] =torch.manual_seed(UpperCAmelCase_)
else:
lowerCamelCase__: Any =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowerCamelCase__: List[Any] ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__: Optional[Any] =self.get_dummy_components()
lowerCamelCase__: List[Any] =TextToVideoSDPipeline(**UpperCAmelCase_)
lowerCamelCase__: int =sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: Any =self.get_dummy_inputs(UpperCAmelCase_)
lowerCamelCase__: List[Any] ="np"
lowerCamelCase__: Optional[int] =sd_pipe(**UpperCAmelCase_).frames
lowerCamelCase__: Dict =frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowerCamelCase__: Optional[int] =np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->str:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2)
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str:
'''simple docstring'''
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.")
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Any:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Dict =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy")
lowerCamelCase__: Optional[int] =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
lowerCamelCase__: str =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCamelCase__: Tuple =pipe.to("cuda")
lowerCamelCase__: List[Any] ="Spiderman is surfing"
lowerCamelCase__: Dict =torch.Generator(device="cpu").manual_seed(0)
lowerCamelCase__: Optional[int] =pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="pt").frames
lowerCamelCase__: str =video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy")
lowerCamelCase__: List[str] =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
lowerCamelCase__: Any =pipe.to("cuda")
lowerCamelCase__: Dict ="Spiderman is surfing"
lowerCamelCase__: Dict =torch.Generator(device="cpu").manual_seed(0)
lowerCamelCase__: Optional[int] =pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="pt").frames
lowerCamelCase__: List[Any] =video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
| 437 | 0 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase = 2
class lowercase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , *, # begin keyword-only arguments
_UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : List[Any]="</s>" , _UpperCAmelCase : Union[str, Any]="<unk>" , _UpperCAmelCase : Union[str, Any]=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = bos, unk, pad, eos
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
UpperCAmelCase_ = self.add_symbol(_UpperCAmelCase )
UpperCAmelCase_ = self.add_symbol(_UpperCAmelCase )
UpperCAmelCase_ = self.add_symbol(_UpperCAmelCase )
UpperCAmelCase_ = self.add_symbol(_UpperCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_UpperCAmelCase )
UpperCAmelCase_ = len(self.symbols )
def __eq__( self : int , _UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : Optional[int] , _UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Tuple ) -> str:
'''simple docstring'''
return len(self.symbols )
def __contains__( self : List[str] , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return sym in self.indices
@classmethod
def lowercase__ ( cls : Tuple , _UpperCAmelCase : int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = cls()
d.add_from_file(_UpperCAmelCase )
return d
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : List[str]=False ) -> List[str]:
'''simple docstring'''
if word in self.indices and not overwrite:
UpperCAmelCase_ = self.indices[word]
UpperCAmelCase_ = self.count[idx] + n
return idx
else:
UpperCAmelCase_ = len(self.symbols )
UpperCAmelCase_ = idx
self.symbols.append(_UpperCAmelCase )
self.count.append(_UpperCAmelCase )
return idx
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return 0
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
with open(_UpperCAmelCase , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(_UpperCAmelCase ) )
return
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = self._load_meta(_UpperCAmelCase )
for line in lines[indices_start_line:]:
try:
UpperCAmelCase_ , UpperCAmelCase_ = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
UpperCAmelCase_ = True
UpperCAmelCase_ , UpperCAmelCase_ = line.rsplit(" " , 1 )
else:
UpperCAmelCase_ = False
UpperCAmelCase_ = int(_UpperCAmelCase )
UpperCAmelCase_ = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(_UpperCAmelCase ) )
self.add_symbol(_UpperCAmelCase , n=_UpperCAmelCase , overwrite=_UpperCAmelCase )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def a__ ( lowerCAmelCase__ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCAmelCase_ = dict((re.sub(r"@@$" , "" , lowerCAmelCase__ ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , lowerCAmelCase__ ), v) for k, v in d.items() )
UpperCAmelCase_ = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
UpperCAmelCase_ = d[k] # restore
return da
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
# prep
if not os.path.exists(lowerCAmelCase__ ):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , "checkpoint.pt" )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" )
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location="cpu" )
UpperCAmelCase_ = chkpt["cfg"]["model"]
# dicts
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , "dict.txt" )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f"""path to the file {dict_file} does not exist!""" )
UpperCAmelCase_ = Dictionary.load(lowerCAmelCase__ )
UpperCAmelCase_ = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase_ = len(lowerCAmelCase__ )
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES["vocab_file"] )
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# merges_file (bpecodes)
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , "bpecodes" )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" )
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
# model config
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , "config.json" )
UpperCAmelCase_ = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1e-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# tokenizer config
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(f"""Generating {biogpt_tokenizer_config_file}""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# model
UpperCAmelCase_ = chkpt["model"]
# remove unneeded keys
UpperCAmelCase_ = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
UpperCAmelCase_ = model_state_dict.pop(lowerCAmelCase__ )
else:
UpperCAmelCase_ = model_state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = BioGptConfig.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ = BioGptForCausalLM(lowerCAmelCase__ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase__ )
# save
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
print("Conversion is done!" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 82 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1000 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def a__ ( lowerCAmelCase__ ):
if "stem.conv" in name:
UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ = "bit." + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ = "bit.encoder." + name
return name
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ = get_config(lowerCAmelCase__ )
# load original model from timm
UpperCAmelCase_ = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val.squeeze() if "head" in key else val
# load HuggingFace model
UpperCAmelCase_ = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(lowerCAmelCase__ ).unsqueeze(0 )
UpperCAmelCase_ = processor(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(lowerCAmelCase__ )
UpperCAmelCase_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowerCamelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 82 | 1 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Union[str, Any] = int(__UpperCamelCase )
assert noofclusters < len(__UpperCamelCase )
# Find out the dimensionality
snake_case_ : Optional[int] = len(vectors[0] )
# Will help select random centroids from among the available vectors
snake_case_ : int = list(range(len(__UpperCamelCase ) ) )
shuffle(__UpperCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
snake_case_ : str = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
snake_case_ : Dict = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
snake_case_ : str = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__UpperCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
snake_case_ : Dict = tf.placeholder("""float64""" , [dim] )
snake_case_ : List[str] = []
for centroid in centroids:
cent_assigns.append(tf.assign(__UpperCamelCase , __UpperCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
snake_case_ : Any = [tf.Variable(0 ) for i in range(len(__UpperCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
snake_case_ : Tuple = tf.placeholder("""int32""" )
snake_case_ : Optional[int] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__UpperCamelCase , __UpperCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
snake_case_ : str = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
snake_case_ : List[str] = tf.reduce_mean(__UpperCamelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
snake_case_ : Union[str, Any] = tf.placeholder("""float""" , [dim] )
snake_case_ : List[str] = tf.placeholder("""float""" , [dim] )
snake_case_ : int = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__UpperCamelCase , __UpperCamelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
snake_case_ : List[str] = tf.placeholder("""float""" , [noofclusters] )
snake_case_ : Optional[int] = tf.argmin(__UpperCamelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
snake_case_ : List[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(__UpperCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
snake_case_ : Optional[Any] = 1_0_0
for _ in range(__UpperCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__UpperCamelCase ) ):
snake_case_ : Any = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
snake_case_ : str = [
sess.run(__UpperCamelCase , feed_dict={va: vect, va: sess.run(__UpperCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
snake_case_ : Any = sess.run(
__UpperCamelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__UpperCamelCase ):
# Collect all the vectors assigned to this cluster
snake_case_ : List[str] = [
vectors[i]
for i in range(len(__UpperCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
snake_case_ : Dict = sess.run(
__UpperCamelCase , feed_dict={mean_input: array(__UpperCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
snake_case_ : Optional[Any] = sess.run(__UpperCamelCase )
snake_case_ : Any = sess.run(__UpperCamelCase )
return centroids, assignments
| 21 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase : List[str] = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__lowerCAmelCase : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__lowerCAmelCase : str = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Any ):
'''simple docstring'''
return float((preds == labels).mean() )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : str="binary" ):
'''simple docstring'''
snake_case_ : Optional[Any] = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Dict = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Optional[int] = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
snake_case_ : Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
snake_case_ : str = [(pred, label)]
snake_case_ , snake_case_ : List[str] = [], []
for question, preds_labels in question_map.items():
snake_case_ , snake_case_ : Optional[Any] = zip(*__UpperCamelCase )
snake_case_ : int = fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average="""macro""" )
fas.append(__UpperCamelCase )
snake_case_ : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
snake_case_ : Optional[int] = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
snake_case_ : Any = sum(__UpperCamelCase ) / len(__UpperCamelCase )
snake_case_ : int = float(fa_score(y_true=__UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
snake_case_ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
snake_case_ : Dict = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 21 | 1 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
a_ = logging.getLogger(__name__)
a_ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
a_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : Optional[str] = field(
default=snake_case , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
lowerCAmelCase__ : Optional[str] = field(
default=snake_case , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(snake_case )} , )
lowerCAmelCase__ : Optional[str] = field(
default=snake_case , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowerCAmelCase__ : Optional[str] = field(
default=snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase__ : Optional[str] = field(
default=snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase__ : Optional[str] = field(
default=snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase__ : bool = field(
default=snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCAmelCase__ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase__ : bool = field(
default=snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def _UpperCAmelCase ( self: Any ) -> Dict:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : Optional[str] = field(
default=snake_case , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
lowerCAmelCase__ : Optional[str] = field(
default=snake_case , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCAmelCase__ : Optional[str] = field(default=snake_case , metadata={'help': 'The input training data file (a text file).'} )
lowerCAmelCase__ : Optional[str] = field(
default=snake_case , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
lowerCAmelCase__ : Optional[str] = field(
default=snake_case , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
lowerCAmelCase__ : Optional[str] = field(
default=snake_case , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
lowerCAmelCase__ : bool = field(
default=snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ : Optional[int] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCAmelCase__ : Optional[int] = field(
default=snake_case , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
lowerCAmelCase__ : Optional[int] = field(
default=snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCAmelCase__ : float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowerCAmelCase__ : bool = field(
default=snake_case , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def _UpperCAmelCase ( self: int ) -> int:
'''simple docstring'''
if self.train_file is not None:
__UpperCAmelCase = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__UpperCAmelCase = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __lowerCAmelCase ( A_ : int , A_ : Optional[int] ) -> Optional[Any]:
with open(A_ , "r" , encoding="utf-8" ) as f:
__UpperCAmelCase = [json.loads(A_ ) for line in f.read().splitlines() if (len(A_ ) > 0 and not line.isspace())]
assert len(A_ ) == len(A_ )
__UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
__UpperCAmelCase = refs
return Dataset.from_dict(A_ )
def __lowerCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , A_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , )
__UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , )
else:
__UpperCAmelCase = {}
if data_args.train_file is not None:
__UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__UpperCAmelCase = data_args.validation_file
__UpperCAmelCase = data_args.train_file.split("." )[-1]
if extension == "txt":
__UpperCAmelCase = "text"
__UpperCAmelCase = load_dataset(A_ , data_files=A_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **A_ )
elif model_args.model_name_or_path:
__UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **A_ )
else:
__UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
__UpperCAmelCase = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **A_ )
elif model_args.model_name_or_path:
__UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **A_ )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name." )
if model_args.model_name_or_path:
__UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
__UpperCAmelCase = AutoModelForMaskedLM.from_config(A_ )
model.resize_token_embeddings(len(A_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__UpperCAmelCase = datasets["train"].column_names
else:
__UpperCAmelCase = datasets["validation"].column_names
__UpperCAmelCase = "text" if "text" in column_names else column_names[0]
__UpperCAmelCase = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(A_ : str ):
# Remove empty lines
__UpperCAmelCase = [line for line in examples["text"] if len(A_ ) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=A_ , truncation=A_ , max_length=data_args.max_seq_length )
__UpperCAmelCase = datasets.map(
A_ , batched=A_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__UpperCAmelCase = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__UpperCAmelCase = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
__UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=A_ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__UpperCAmelCase = Trainer(
model=A_ , args=A_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=A_ , data_collator=A_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__UpperCAmelCase = model_args.model_name_or_path
else:
__UpperCAmelCase = None
__UpperCAmelCase = trainer.train(resume_from_checkpoint=A_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__UpperCAmelCase = os.path.join(training_args.output_dir , "train_results.txt" )
if trainer.is_world_process_zero():
with open(A_ , "w" ) as writer:
logger.info("***** Train results *****" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = math.exp(eval_output["eval_loss"] )
__UpperCAmelCase = perplexity
__UpperCAmelCase = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" )
if trainer.is_world_process_zero():
with open(A_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in sorted(results.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
return results
def __lowerCAmelCase ( A_ : Dict ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 221 | import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
a_ = 4
a_ = 3
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
pass
def __lowerCAmelCase ( A_ : List[str] ) -> List[Any]:
for shard in shards:
for i in range(A_ ):
yield {"i": i, "shard": shard}
def __lowerCAmelCase ( ) -> List[str]:
__UpperCAmelCase = int(os.environ["RANK"] )
__UpperCAmelCase = int(os.environ["WORLD_SIZE"] )
__UpperCAmelCase = ArgumentParser()
parser.add_argument("--streaming" , type=A_ )
parser.add_argument("--local_rank" , type=A_ )
parser.add_argument("--num_workers" , type=A_ , default=0 )
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = args.streaming
__UpperCAmelCase = args.num_workers
__UpperCAmelCase = {"shards": [F'''shard_{shard_idx}''' for shard_idx in range(A_ )]}
__UpperCAmelCase = IterableDataset.from_generator(A_ , gen_kwargs=A_ )
if not streaming:
__UpperCAmelCase = Dataset.from_list(list(A_ ) )
__UpperCAmelCase = split_dataset_by_node(A_ , rank=A_ , world_size=A_ )
__UpperCAmelCase = torch.utils.data.DataLoader(A_ , num_workers=A_ )
__UpperCAmelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__UpperCAmelCase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__UpperCAmelCase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 221 | 1 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
A = logging.getLogger(__name__)
def _UpperCamelCase ( UpperCamelCase ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = git.Repo(search_parent_directories=UpperCamelCase )
__UpperCAmelCase : Any = {
"repo_id": str(UpperCamelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(UpperCamelCase , "git_log.json" ) , "w" ) as f:
json.dump(UpperCamelCase , UpperCamelCase , indent=4 )
def _UpperCamelCase ( UpperCamelCase ) -> str:
"""simple docstring"""
if params.n_gpu <= 0:
__UpperCAmelCase : str = 0
__UpperCAmelCase : Dict = -1
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : List[str] = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
__UpperCAmelCase : Optional[int] = int(os.environ["WORLD_SIZE"] )
__UpperCAmelCase : Union[str, Any] = int(os.environ["N_GPU_NODE"] )
__UpperCAmelCase : Optional[int] = int(os.environ["RANK"] )
# number of nodes / node ID
__UpperCAmelCase : int = params.world_size // params.n_gpu_per_node
__UpperCAmelCase : Optional[int] = params.global_rank // params.n_gpu_per_node
__UpperCAmelCase : Tuple = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
__UpperCAmelCase : Union[str, Any] = 1
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Any = 1
__UpperCAmelCase : str = 1
__UpperCAmelCase : Dict = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__UpperCAmelCase : Union[str, Any] = params.node_id == 0 and params.local_rank == 0
__UpperCAmelCase : Dict = params.n_nodes > 1
# summary
__UpperCAmelCase : Any = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def _UpperCamelCase ( UpperCamelCase ) -> Tuple:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 487 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = ConsistencyModelPipeline
lowercase_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def a_ ( self : Any , UpperCamelCase_ : int=False):
"""simple docstring"""
if class_cond:
__UpperCAmelCase : List[Any] = self.dummy_cond_unet
else:
__UpperCAmelCase : Optional[int] = self.dummy_uncond_unet
# Default to CM multistep sampler
__UpperCAmelCase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
}
return components
def a_ ( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=0):
"""simple docstring"""
if str(UpperCamelCase_).startswith("mps"):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_)
else:
__UpperCAmelCase : Optional[Any] = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_)
__UpperCAmelCase : List[Any] = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
__UpperCAmelCase : str = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : Any = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : str = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Optional[int] = self.get_dummy_components(class_cond=UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : str = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : int = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Optional[int] = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : List[str] = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : int = None
__UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[str] = self.get_dummy_components(class_cond=UpperCamelCase_)
__UpperCAmelCase : Tuple = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : int = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Tuple = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Dict = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : int="cpu" , UpperCamelCase_ : Any=torch.floataa , UpperCamelCase_ : List[str]=(1, 3, 64, 64)):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = torch.manual_seed(UpperCamelCase_)
__UpperCAmelCase : int = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
__UpperCAmelCase : int = self.get_fixed_latents(seed=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ , shape=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = latents
return inputs
def a_ ( self : Union[str, Any] , UpperCamelCase_ : int=0 , UpperCamelCase_ : Tuple="cpu" , UpperCamelCase_ : Tuple=torch.floataa , UpperCamelCase_ : Optional[Any]=(1, 3, 64, 64)):
"""simple docstring"""
if type(UpperCamelCase_) == str:
__UpperCAmelCase : Union[str, Any] = torch.device(UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_)
return latents
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : Dict = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Dict = self.get_inputs()
__UpperCAmelCase : List[str] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : List[str] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : Any = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : int = self.get_inputs()
__UpperCAmelCase : str = 1
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
@require_torch_a
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : int = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : int = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_ , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Tuple = self.get_inputs(get_fixed_latents=UpperCamelCase_ , device=UpperCamelCase_)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase_ , enable_math=UpperCamelCase_ , enable_mem_efficient=UpperCamelCase_):
__UpperCAmelCase : List[str] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Optional[int] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@require_torch_a
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Tuple = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : List[Any] = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_ , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = self.get_inputs(get_fixed_latents=UpperCamelCase_ , device=UpperCamelCase_)
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : Any = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase_ , enable_math=UpperCamelCase_ , enable_mem_efficient=UpperCamelCase_):
__UpperCAmelCase : List[str] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : int = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[str] = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
| 487 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase_ = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase_ = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase_ = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase_ = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
UpperCamelCase_ = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
UpperCamelCase_ = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
UpperCamelCase_ = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
UpperCamelCase_ = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
UpperCamelCase_ = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : str = VOCAB_FILES_NAMES
a_ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a_ : int = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Optional[int] = VOCAB_FILES_NAMES
a_ : str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
UpperCamelCase_ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
UpperCamelCase_ = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_lowerCAmelCase )
class _SCREAMING_SNAKE_CASE :
def __call__(self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
elif titles is None or texts is None:
__UpperCAmelCase =titles if texts is None else texts
return super().__call__(
UpperCAmelCase , UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
__UpperCAmelCase =titles if not isinstance(UpperCAmelCase , UpperCAmelCase) else [titles]
__UpperCAmelCase =texts if not isinstance(UpperCAmelCase , UpperCAmelCase) else [texts]
__UpperCAmelCase =len(UpperCAmelCase)
__UpperCAmelCase =questions if not isinstance(UpperCAmelCase , UpperCAmelCase) else [questions] * n_passages
if len(UpperCAmelCase) != len(UpperCAmelCase):
raise ValueError(
f"""There should be as many titles than texts but got {len(UpperCAmelCase)} titles and {len(UpperCAmelCase)} texts.""")
__UpperCAmelCase =super().__call__(UpperCAmelCase , UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase)['''input_ids''']
__UpperCAmelCase =super().__call__(UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase)['''input_ids''']
__UpperCAmelCase ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase , UpperCAmelCase)
]
}
if return_attention_mask is not False:
__UpperCAmelCase =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
__UpperCAmelCase =attention_mask
return self.pad(UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors=UpperCAmelCase)
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1_6 , UpperCAmelCase = 6_4 , UpperCAmelCase = 4 , ):
'''simple docstring'''
__UpperCAmelCase =reader_input['''input_ids''']
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =reader_output[:3]
__UpperCAmelCase =len(UpperCAmelCase)
__UpperCAmelCase =sorted(range(UpperCAmelCase) , reverse=UpperCAmelCase , key=relevance_logits.__getitem__)
__UpperCAmelCase =[]
for doc_id in sorted_docs:
__UpperCAmelCase =list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
__UpperCAmelCase =sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__UpperCAmelCase =sequence_ids.index(self.pad_token_id)
else:
__UpperCAmelCase =len(UpperCAmelCase)
__UpperCAmelCase =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase , top_spans=UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase , start_index=UpperCAmelCase , end_index=UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(UpperCAmelCase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
'''simple docstring'''
__UpperCAmelCase =[]
for start_index, start_score in enumerate(UpperCAmelCase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
__UpperCAmelCase =sorted(UpperCAmelCase , key=lambda UpperCAmelCase: x[1] , reverse=UpperCAmelCase)
__UpperCAmelCase =[]
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""")
__UpperCAmelCase =end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""")
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(UpperCAmelCase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowerCAmelCase )
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase ):
a_ : Dict = VOCAB_FILES_NAMES
a_ : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
a_ : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Dict = READER_PRETRAINED_INIT_CONFIGURATION
a_ : Tuple = ['''input_ids''', '''attention_mask''']
| 132 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 132 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def A ( snake_case :int , snake_case :int , snake_case :bool , snake_case :list[int] , snake_case :float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(snake_case ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , snake_case , snake_case , snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case , snake_case , snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , snake_case , snake_case , snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case , snake_case , snake_case ) , )
def A ( ) -> None:
__UpperCamelCase = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__UpperCamelCase = math.log(len(snake_case ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , snake_case , snake_case , snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "efficientformer"
def __init__( self , __UpperCAmelCase = [3, 2, 6, 4] , __UpperCAmelCase = [48, 96, 224, 448] , __UpperCAmelCase = [True, True, True, True] , __UpperCAmelCase = 448 , __UpperCAmelCase = 32 , __UpperCAmelCase = 4 , __UpperCAmelCase = 7 , __UpperCAmelCase = 5 , __UpperCAmelCase = 8 , __UpperCAmelCase = 4 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 16 , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = 2 , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 1 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = 1E-5 , __UpperCAmelCase = "gelu" , __UpperCAmelCase = 0.0_2 , __UpperCAmelCase = 1E-12 , __UpperCAmelCase = 224 , __UpperCAmelCase = 1E-05 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = hidden_sizes
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = depths
__UpperCamelCase = mlp_expansion_ratio
__UpperCamelCase = downsamples
__UpperCamelCase = dim
__UpperCamelCase = key_dim
__UpperCamelCase = attention_ratio
__UpperCamelCase = resolution
__UpperCamelCase = pool_size
__UpperCamelCase = downsample_patch_size
__UpperCamelCase = downsample_stride
__UpperCamelCase = downsample_pad
__UpperCamelCase = drop_path_rate
__UpperCamelCase = num_metaad_blocks
__UpperCamelCase = distillation
__UpperCamelCase = use_layer_scale
__UpperCamelCase = layer_scale_init_value
__UpperCamelCase = image_size
__UpperCamelCase = batch_norm_eps
| 293 | 0 |
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
A_ = 2**power
A_ = str(__UpperCamelCase )
A_ = list(__UpperCamelCase )
A_ = 0
for i in list_num:
sum_of_num += int(__UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
__a :Union[str, Any] = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
__a :List[str] = solution(power)
print('Sum of the digits is: ', result) | 86 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase__ ( lowercase__ : float , lowercase__ : float , lowercase__ : float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 134 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class a_ :
_snake_case = 42
_snake_case = None
_snake_case = None
__A = namedtuple('''CoinsDistribResult''', '''moves excess''')
def _SCREAMING_SNAKE_CASE ( A : TreeNode | None ) -> int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(A : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A ) != count_coins(A ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(A : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__snake_case ,__snake_case : List[str] = get_distrib(node.left )
__snake_case ,__snake_case : Optional[int] = get_distrib(node.right )
__snake_case : Any = 1 - left_distrib_excess
__snake_case : str = 1 - right_distrib_excess
__snake_case : List[str] = (
left_distrib_moves
+ right_distrib_moves
+ abs(A )
+ abs(A )
)
__snake_case : List[str] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A , A )
return get_distrib(A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod() | 61 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 61 | 1 |
import numpy as np
class _a :
"""simple docstring"""
def __init__( self : str ):
A_ = (0, 0)
A_ = None
A_ = 0
A_ = 0
A_ = 0
def __eq__( self : str , UpperCAmelCase : int ):
return self.position == cell.position
def __A ( self : str ):
print(self.position )
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : Any=(5, 5) ):
A_ = np.zeros(UpperCAmelCase )
A_ = world_size[0]
A_ = world_size[1]
def __A ( self : Optional[int] ):
print(self.w )
def __A ( self : Tuple , UpperCAmelCase : List[Any] ):
A_ = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
A_ = cell.position[0]
A_ = cell.position[1]
A_ = []
for n in neughbour_cord:
A_ = current_x + n[0]
A_ = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
A_ = Cell()
A_ = (x, y)
A_ = cell
neighbours.append(UpperCAmelCase )
return neighbours
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = []
A_ = []
_open.append(lowercase__ )
while _open:
A_ = np.argmin([n.f for n in _open] )
A_ = _open[min_f]
_closed.append(_open.pop(lowercase__ ) )
if current == goal:
break
for n in world.get_neigbours(lowercase__ ):
for c in _closed:
if c == n:
continue
A_ = current.g + 1
A_ = n.position
A_ = goal.position
A_ = (ya - ya) ** 2 + (xa - xa) ** 2
A_ = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowercase__ )
A_ = []
while current.parent is not None:
path.append(current.position )
A_ = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__a :Dict = Gridworld()
# Start position and goal
__a :int = Cell()
__a :str = (0, 0)
__a :int = Cell()
__a :int = (4, 4)
print(F"path from {start.position} to {goal.position}")
__a :List[str] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__a :Any = 1
print(world.w) | 86 |
"""simple docstring"""
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = LxmertTokenizer
__UpperCamelCase : Optional[Any] = LxmertTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : Any = True
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = "UNwant\u00E9d,running"
lowerCAmelCase__ : Union[str, Any] = "unwanted, running"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ : List[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [7, 4, 5, 10, 8, 9] )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : int = self.get_tokenizer()
lowerCAmelCase__ : List[str] = self.get_rust_tokenizer()
lowerCAmelCase__ : Optional[Any] = "I was born in 92000, and this is falsé."
lowerCAmelCase__ : int = tokenizer.tokenize(snake_case )
lowerCAmelCase__ : int = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
lowerCAmelCase__ : str = tokenizer.encode(snake_case , add_special_tokens=snake_case )
lowerCAmelCase__ : List[str] = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
lowerCAmelCase__ : Tuple = self.get_rust_tokenizer()
lowerCAmelCase__ : Tuple = tokenizer.encode(snake_case )
lowerCAmelCase__ : List[Any] = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
| 453 | 0 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCamelCase_ ( lowerCamelCase : BertModel , lowerCamelCase : str , lowerCamelCase : str ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Optional[int] = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__magic_name__ : Dict = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCamelCase ):
os.makedirs(lowerCamelCase )
__magic_name__ : Optional[Any] = model.state_dict()
def to_tf_var_name(lowerCamelCase : str ):
for patt, repl in iter(lowerCamelCase ):
__magic_name__ : Any = name.replace(lowerCamelCase , lowerCamelCase )
return f"""bert/{name}"""
def create_tf_var(lowerCamelCase : np.ndarray , lowerCamelCase : str , lowerCamelCase : tf.Session ):
__magic_name__ : int = tf.dtypes.as_dtype(tensor.dtype )
__magic_name__ : int = tf.get_variable(dtype=lowerCamelCase , shape=tensor.shape , name=lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__magic_name__ : Optional[Any] = to_tf_var_name(lowerCamelCase )
__magic_name__ : List[str] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__magic_name__ : Optional[int] = torch_tensor.T
__magic_name__ : Optional[Any] = create_tf_var(tensor=lowerCamelCase , name=lowerCamelCase , session=lowerCamelCase )
tf.keras.backend.set_value(lowerCamelCase , lowerCamelCase )
__magic_name__ : List[str] = session.run(lowerCamelCase )
print(f"""Successfully created {tf_name}: {np.allclose(lowerCamelCase , lowerCamelCase )}""" )
__magic_name__ : Dict = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCamelCase , os.path.join(lowerCamelCase , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def UpperCamelCase_ ( lowerCamelCase : str=None ) -> Tuple:
"""simple docstring"""
__magic_name__ : str = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCamelCase , required=lowerCamelCase , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowerCamelCase , default=lowerCamelCase , required=lowerCamelCase , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowerCamelCase , required=lowerCamelCase , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowerCamelCase , required=lowerCamelCase , help='''Directory in which to save tensorflow model''' )
__magic_name__ : List[Any] = parser.parse_args(lowerCamelCase )
__magic_name__ : List[str] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 147 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = LayoutLMTokenizer
snake_case_ = LayoutLMTokenizerFast
snake_case_ = True
snake_case_ = True
def _UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
__magic_name__ : Optional[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _UpperCAmelCase ( self : List[str] , **snake_case : List[Any] ) -> List[str]:
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def _UpperCAmelCase ( self : Dict , snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Optional[Any] = '''UNwant\u00E9d,running'''
__magic_name__ : List[str] = '''unwanted, running'''
return input_text, output_text
def _UpperCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Dict = self.tokenizer_class(self.vocab_file )
__magic_name__ : Dict = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(snake_case , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [7, 4, 5, 10, 8, 9] )
def _UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
| 147 | 1 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ = '''pytorch_model.bin'''
@dataclasses.dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
A__ : Optional[str] = dataclasses.field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
A__ : Optional[str] = dataclasses.field(
default=UpperCAmelCase_ , metadata={"help": "A csv or a json file containing the validation data."} )
A__ : Optional[str] = dataclasses.field(
default=UpperCAmelCase_ , metadata={"help": "The name of the task to train on."} , )
A__ : Optional[List[str]] = dataclasses.field(
default=UpperCAmelCase_ , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
A__ : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
A__ : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
A__ : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
A__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
A__ : Optional[bool] = dataclasses.field(
default=UpperCAmelCase_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
A__ : Optional[bool] = dataclasses.field(
default=UpperCAmelCase_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
A__ : Optional[bool] = dataclasses.field(
default=UpperCAmelCase_ , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
A__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
A__ : Optional[int] = dataclasses.field(
default=1_00 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
A__ : Optional[int] = dataclasses.field(
default=UpperCAmelCase_ , metadata={"help": "Random seed for initialization."} , )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
A__ = dataset.filter(lambda __UpperCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
A__ = int(eval_result * len(__UpperCamelCase ) )
print(__UpperCamelCase )
A__ = dataset.sort('probability' , reverse=__UpperCamelCase )
A__ = dataset.select(range(__UpperCamelCase ) )
A__ = dataset.remove_columns(['label', 'probability'] )
A__ = dataset.rename_column('prediction' , 'label' )
A__ = dataset.map(lambda __UpperCamelCase : {"label": idalabel[example["label"]]} )
A__ = dataset.shuffle(seed=args.seed )
A__ = os.path.join(__UpperCamelCase , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(__UpperCamelCase , index=__UpperCamelCase )
else:
dataset.to_json(__UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> int:
A__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
A__ = STModelArguments(model_name_or_path=__UpperCamelCase )
A__ = STDataArguments(train_file=__UpperCamelCase , infer_file=__UpperCamelCase )
A__ = STTrainingArguments(output_dir=__UpperCamelCase )
A__ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__UpperCamelCase ).items():
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for key, value in kwargs.items():
if hasattr(__UpperCamelCase , __UpperCamelCase ):
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Sanity checks
A__ = {}
A__ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
A__ = args.train_file
A__ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
A__ = args.eval_file
for key in data_files:
A__ = data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
A__ = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
A__ = f'''{args.output_dir}/self-train_iter-{{}}'''.format
A__ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
accelerator.wait_for_everyone()
A__ = None
A__ = None
A__ = 0
A__ = False
# Show the progress bar
A__ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
A__ = data_dir_format(__UpperCamelCase )
assert os.path.exists(__UpperCamelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
A__ = os.path.join(__UpperCamelCase , 'stage-1' )
A__ = {
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__UpperCamelCase , __UpperCamelCase ):
arguments_dict.update({key: value} )
A__ = os.path.join(__UpperCamelCase , 'best-checkpoint' , __UpperCamelCase )
if os.path.exists(__UpperCamelCase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , __UpperCamelCase , __UpperCamelCase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , __UpperCamelCase )
finetune(**__UpperCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__UpperCamelCase )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , __UpperCamelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
A__ = os.path.join(__UpperCamelCase , 'best-checkpoint' )
A__ = os.path.join(__UpperCamelCase , 'stage-2' )
# Update arguments_dict
A__ = model_path
A__ = data_files['train']
A__ = current_output_dir
A__ = os.path.join(__UpperCamelCase , 'best-checkpoint' , __UpperCamelCase )
if os.path.exists(__UpperCamelCase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , __UpperCamelCase , __UpperCamelCase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , __UpperCamelCase )
finetune(**__UpperCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__UpperCamelCase )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , __UpperCamelCase )
A__ = iteration
A__ = data_dir_format(iteration + 1 )
A__ = AutoConfig.from_pretrained(os.path.join(__UpperCamelCase , 'best-checkpoint' ) )
A__ = config.idalabel
A__ = os.path.join(__UpperCamelCase , 'eval_results_best-checkpoint.json' )
A__ = os.path.join(__UpperCamelCase , 'test_results_best-checkpoint.json' )
assert os.path.exists(__UpperCamelCase )
with open(__UpperCamelCase , 'r' ) as f:
A__ = float(json.load(__UpperCamelCase )[args.eval_metric] )
A__ = os.path.join(__UpperCamelCase , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(__UpperCamelCase )
# Loading the dataset from local csv or json files.
A__ = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
A__ = load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
shutil.copy(__UpperCamelCase , os.path.join(__UpperCamelCase , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(__UpperCamelCase ):
shutil.copy(__UpperCamelCase , os.path.join(__UpperCamelCase , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
accelerator.wait_for_everyone()
A__ = os.path.join(__UpperCamelCase , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
A__ = eval_result
if best_iteration is None:
A__ = new_iteration
A__ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
A__ = new_iteration
A__ = new_eval_result
A__ = 0
else:
if new_eval_result == best_eval_result:
A__ = new_iteration
A__ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
A__ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , __UpperCamelCase )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , __UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__UpperCamelCase , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(__UpperCamelCase , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , __UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__UpperCamelCase , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(__UpperCamelCase , 'eval_results_best-iteration.json' ) , )
| 9 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : str = ["pixel_values"]
def __init__( self , a = True , a = None , a = PIL.Image.BICUBIC , a = True , a = None , a = 1 / 2_55 , a = True , a = True , a = None , a = None , **a , ) -> None:
'''simple docstring'''
super().__init__(**a )
_UpperCamelCase = size if size is not None else {"""height""": 2_56, """width""": 2_56}
_UpperCamelCase = get_size_dict(a )
_UpperCamelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_UpperCamelCase = get_size_dict(a , param_name="""crop_size""" )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self , a , a , a = PIL.Image.BICUBIC , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
a , size=(size["""height"""], size["""width"""]) , resample=a , data_format=a , **a )
def A_ ( self , a , a , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(a , size=(size["""height"""], size["""width"""]) , data_format=a , **a )
def A_ ( self , a , a , a = None , **a , ) -> List[str]:
'''simple docstring'''
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self , a , a , a , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self , a , a = None , a = None , a=None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> PIL.Image.Image:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(a )
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(a , param_name="""crop_size""" )
_UpperCamelCase = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(a ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=a , mean=a , std=a ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(a , a ) for image in images]
_UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=a , tensor_type=a )
| 612 | 0 |
from ..utils import DummyObject, requires_backends
class __A ( metaclass=lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = ["""keras_nlp"""]
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ['''keras_nlp'''] )
| 29 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
def wrapper(*__snake_case ,**__snake_case ):
lowerCamelCase__ = timeit.default_timer()
lowerCamelCase__ = func(*__snake_case ,**__snake_case )
lowerCamelCase__ = timeit.default_timer() - starttime
return delta
lowerCamelCase__ = func.__name__
return wrapper
def lowerCAmelCase__(__snake_case ,__snake_case=100 ,__snake_case=None ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = seq_shapes or {}
for i in range(__snake_case ):
lowerCamelCase__ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__snake_case ,_ArrayXD ):
lowerCamelCase__ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__snake_case ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase__ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase__ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(__snake_case ,datasets.Sequence ):
while isinstance(__snake_case ,datasets.Sequence ):
lowerCamelCase__ = v.feature
lowerCamelCase__ = seq_shapes[k]
lowerCamelCase__ = np.random.rand(*__snake_case ).astype(v.dtype )
lowerCamelCase__ = data
dummy_data.append((i, example) )
return dummy_data
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=100 ,__snake_case=None ) -> str:
'''simple docstring'''
lowerCamelCase__ = generate_examples(__snake_case ,num_examples=__snake_case ,seq_shapes=__snake_case )
with ArrowWriter(features=__snake_case ,path=__snake_case ) as writer:
for key, record in dummy_data:
lowerCamelCase__ = features.encode_example(__snake_case )
writer.write(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
lowerCamelCase__ = datasets.Dataset.from_file(filename=__snake_case ,info=datasets.DatasetInfo(features=__snake_case ) )
return dataset
| 29 | 1 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = """▁"""
__a = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
__a = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
__a = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
__a = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
__a = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class __lowercase ( __snake_case ):
UpperCamelCase = ["input_ids"]
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = RESOURCE_FILES_NAMES
def __init__( self : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any=None , __lowerCamelCase : int=False , __lowerCamelCase : str="utf8" , __lowerCamelCase : List[Any]="[UNK]" , __lowerCamelCase : Union[str, Any]="[SEP]" , __lowerCamelCase : Optional[Any]="[PAD]" , __lowerCamelCase : List[Any]="[CLS]" , __lowerCamelCase : Union[str, Any]="[MASK]" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : List[str] , ) -> None:
"""simple docstring"""
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , vocab_file=__lowerCamelCase , encoding=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
UpperCAmelCase = do_lower_case
UpperCAmelCase = sentencepiece_model_ckpt
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase = self.load_vocab(filepath=__lowerCamelCase )
else:
UpperCAmelCase = {self.sp_model.id_to_piece(__lowerCamelCase ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase = {v: k for k, v in self.vocab.items()}
def _lowercase ( self : str , __lowerCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
if text is None:
return None
UpperCAmelCase = self.tokenize(__lowerCamelCase )
UpperCAmelCase , UpperCAmelCase = """""", []
for i, ch in enumerate(__lowerCamelCase ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase = self.SP_CHAR_MAPPING.get(__lowerCamelCase )
else:
UpperCAmelCase = unicodedata.normalize("""NFKC""" , __lowerCamelCase )
if self.is_whitespace(__lowerCamelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__lowerCamelCase ) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase = token[1:]
UpperCAmelCase = text[offset:].index(__lowerCamelCase ) + offset
UpperCAmelCase = start + len(__lowerCamelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase = end
return token_mapping
@property
def _lowercase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.vocab )
def _lowercase ( self : str ) -> List[str]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : int , __lowerCamelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowercase ( self : Optional[int] , __lowerCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__lowerCamelCase , __lowerCamelCase ) for c in text) )
def _lowercase ( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict=False , __lowerCamelCase : int=6_4 , __lowerCamelCase : Dict=0.1 ) -> Any:
"""simple docstring"""
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
UpperCAmelCase = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
UpperCAmelCase = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
UpperCAmelCase = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
UpperCAmelCase = self.sp_model.EncodeAsPieces(__lowerCamelCase )
else:
UpperCAmelCase = self.sp_model.SampleEncodeAsPieces(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCAmelCase = []
for pi, piece in enumerate(__lowerCamelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__lowerCamelCase ) and pi != 0:
new_pieces.append(__lowerCamelCase )
continue
else:
continue
UpperCAmelCase = 0
for i, chunk in enumerate(__lowerCamelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__lowerCamelCase ) or self.is_punct(__lowerCamelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__lowerCamelCase )
UpperCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase = i
if len(__lowerCamelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase = """""".join(__lowerCamelCase ).replace(__lowerCamelCase , """ """ ).strip()
return out_string
def _lowercase ( self : Any , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.convert_ids_to_tokens(__lowerCamelCase )
UpperCAmelCase = """""".join(__lowerCamelCase ).replace(__lowerCamelCase , """ """ ).strip()
return out_string
def _lowercase ( self : Optional[int] , __lowerCamelCase : List[Any] ) -> int:
"""simple docstring"""
return self.vocab.get(__lowerCamelCase , self.vocab.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.reverse_vocab.get(__lowerCamelCase , self.unk_token )
def _lowercase ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str]=None ) -> Tuple:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict=False ) -> Optional[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowercase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__lowerCamelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__lowerCamelCase ) + 1) + [1] * (len(__lowerCamelCase ) + 3)
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : str ) -> List[str]:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase ( self : Optional[Any] , __lowerCamelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase ( self : Optional[int] , __lowerCamelCase : int ) -> int:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase ( self : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__lowerCamelCase ) == 1:
UpperCAmelCase = unicodedata.category(__lowerCamelCase )
if cat == "Zs":
return True
return False
def _lowercase ( self : Any , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = {}
with io.open(__lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(__lowerCamelCase ):
UpperCAmelCase = line.rstrip("""\n""" )
UpperCAmelCase = int(__lowerCamelCase )
return token_to_idx
def _lowercase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = 0
if os.path.isdir(__lowerCamelCase ):
UpperCAmelCase = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
UpperCAmelCase = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
UpperCAmelCase = token_index
writer.write(token + """\n""" )
index += 1
UpperCAmelCase = os.path.join(__lowerCamelCase , """sentencepiece.bpe.model""" )
with open(__lowerCamelCase , """wb""" ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (vocab_file,)
| 377 |
def _UpperCamelCase ( lowerCAmelCase_ ) ->Any:
UpperCAmelCase = 0
UpperCAmelCase = len(lowerCAmelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCAmelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _UpperCamelCase ( lowerCAmelCase_ ) ->Any:
if len(lowerCAmelCase_ ) <= 1:
return arr, 0
UpperCAmelCase = len(lowerCAmelCase_ ) // 2
UpperCAmelCase = arr[0:mid]
UpperCAmelCase = arr[mid:]
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = _count_cross_inversions(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
UpperCAmelCase = []
UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = 0
while i < len(lowerCAmelCase_ ) and j < len(lowerCAmelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCAmelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCAmelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _UpperCamelCase ( ) ->int:
UpperCAmelCase = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCAmelCase = count_inversions_bf(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , lowerCAmelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCAmelCase = count_inversions_bf(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowerCAmelCase_ )
# an empty list should also have zero inversions
UpperCAmelCase = []
UpperCAmelCase = count_inversions_bf(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 377 | 1 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
snake_case__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=5_12,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
snake_case__ : Optional[Any] = parser.parse_args()
snake_case__ : Union[str, Any] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 721 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """conditional_detr"""
_snake_case : Union[str, Any] = ["""past_key_values"""]
_snake_case : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Optional[int]=300 , lowerCamelCase : List[Any]=6 , lowerCamelCase : str=2_048 , lowerCamelCase : Any=8 , lowerCamelCase : List[str]=6 , lowerCamelCase : Any=2_048 , lowerCamelCase : List[Any]=8 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : List[Any]=True , lowerCamelCase : str="relu" , lowerCamelCase : int=256 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : int=1.0 , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]="sine" , lowerCamelCase : List[Any]="resnet50" , lowerCamelCase : Any=True , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=2 , lowerCamelCase : List[Any]=5 , lowerCamelCase : str=2 , lowerCamelCase : Dict=1 , lowerCamelCase : List[str]=1 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Dict=5 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Tuple=0.25 , **lowerCamelCase : List[str] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowercase = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = backbone_config.get("model_type" )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(lowerCamelCase )
__lowercase = use_timm_backbone
__lowercase = backbone_config
__lowercase = num_channels
__lowercase = num_queries
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = encoder_layers
__lowercase = auxiliary_loss
__lowercase = position_embedding_type
__lowercase = backbone
__lowercase = use_pretrained_backbone
__lowercase = dilation
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = mask_loss_coefficient
__lowercase = dice_loss_coefficient
__lowercase = cls_loss_coefficient
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _snake_case ( self : str ):
'''simple docstring'''
return self.d_model
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Any = version.parse("""1.11""" )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _snake_case ( self : Any ):
'''simple docstring'''
return 1e-5
@property
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return 12
| 655 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:Tuple = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE:Tuple = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
SCREAMING_SNAKE_CASE:Dict = test_metrics
@require_cpu
def __UpperCamelCase ( self : Union[str, Any] ):
debug_launcher(self.test_metrics.main ,num_processes=1 )
@require_cpu
def __UpperCamelCase ( self : Any ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __UpperCamelCase ( self : str ):
self.test_metrics.main()
@require_multi_gpu
def __UpperCamelCase ( self : Any ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
SCREAMING_SNAKE_CASE:Optional[Any] = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ ,env=os.environ.copy() )
| 143 |
'''simple docstring'''
from math import factorial
class _snake_case :
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ):
SCREAMING_SNAKE_CASE:str = real
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:List[str] = [1] * rank
else:
SCREAMING_SNAKE_CASE:int = rank
def __repr__( self : Optional[int] ):
return (
F'''{self.real}+'''
F'''{"+".join(str(SCREAMING_SNAKE_CASE__ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE:Tuple = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real ,SCREAMING_SNAKE_CASE__ )
def __add__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
return Dual(self.real + other ,self.duals )
SCREAMING_SNAKE_CASE:Dict = self.duals.copy()
SCREAMING_SNAKE_CASE:Union[str, Any] = other.duals.copy()
if len(SCREAMING_SNAKE_CASE__ ) > len(SCREAMING_SNAKE_CASE__ ):
o_dual.extend([1] * (len(SCREAMING_SNAKE_CASE__ ) - len(SCREAMING_SNAKE_CASE__ )) )
elif len(SCREAMING_SNAKE_CASE__ ) < len(SCREAMING_SNAKE_CASE__ ):
s_dual.extend([1] * (len(SCREAMING_SNAKE_CASE__ ) - len(SCREAMING_SNAKE_CASE__ )) )
SCREAMING_SNAKE_CASE:Optional[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real ,SCREAMING_SNAKE_CASE__ )
_A : str = __add__
def __sub__( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple ):
return self + other * -1
def __mul__( self : str ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:int = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real ,SCREAMING_SNAKE_CASE__ )
_A : Dict = __mul__
def __truediv__( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any] ):
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other ,SCREAMING_SNAKE_CASE__ )
raise ValueError
def __floordiv__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ):
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:List[str] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other ,SCREAMING_SNAKE_CASE__ )
raise ValueError
def __pow__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
if n < 0 or isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
SCREAMING_SNAKE_CASE:Any = self
for _ in range(n - 1 ):
x *= self
return x
def A_ ( snake_case , snake_case , snake_case ):
if not callable(snake_case ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(snake_case , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(snake_case , snake_case ):
raise ValueError("differentiate() requires an int as input for order" )
SCREAMING_SNAKE_CASE:Tuple = Dual(snake_case , 1 )
SCREAMING_SNAKE_CASE:Optional[Any] = func(snake_case )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
def A_ ( snake_case ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 143 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _snake_case ( _a ):
_A : torch.FloatTensor
class _snake_case ( _a , _a ):
@register_to_config
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int = 32 ,SCREAMING_SNAKE_CASE__ : int = 64 ,SCREAMING_SNAKE_CASE__ : int = 20 ,SCREAMING_SNAKE_CASE__ : int = 768 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=77 ,SCREAMING_SNAKE_CASE__ : Any=4 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : str = "silu" ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = "linear" ,SCREAMING_SNAKE_CASE__ : Optional[str] = "prd" ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,):
super().__init__()
SCREAMING_SNAKE_CASE:Any = num_attention_heads
SCREAMING_SNAKE_CASE:List[str] = attention_head_dim
SCREAMING_SNAKE_CASE:List[str] = num_attention_heads * attention_head_dim
SCREAMING_SNAKE_CASE:Optional[Any] = additional_embeddings
SCREAMING_SNAKE_CASE:Optional[int] = time_embed_dim or inner_dim
SCREAMING_SNAKE_CASE:Optional[int] = embedding_proj_dim or embedding_dim
SCREAMING_SNAKE_CASE:Any = clip_embed_dim or embedding_dim
SCREAMING_SNAKE_CASE:Any = Timesteps(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,0 )
SCREAMING_SNAKE_CASE:int = TimestepEmbedding(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,out_dim=SCREAMING_SNAKE_CASE__ ,act_fn=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if embedding_proj_norm_type is None:
SCREAMING_SNAKE_CASE:Union[str, Any] = None
elif embedding_proj_norm_type == "layer":
SCREAMING_SNAKE_CASE:List[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
SCREAMING_SNAKE_CASE:Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if encoder_hid_proj_type is None:
SCREAMING_SNAKE_CASE:Dict = None
elif encoder_hid_proj_type == "linear":
SCREAMING_SNAKE_CASE:Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
SCREAMING_SNAKE_CASE:Optional[Any] = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,SCREAMING_SNAKE_CASE__ ) )
if added_emb_type == "prd":
SCREAMING_SNAKE_CASE:Dict = nn.Parameter(torch.zeros(1 ,1 ,SCREAMING_SNAKE_CASE__ ) )
elif added_emb_type is None:
SCREAMING_SNAKE_CASE:Dict = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
SCREAMING_SNAKE_CASE:Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,dropout=SCREAMING_SNAKE_CASE__ ,activation_fn="gelu" ,attention_bias=SCREAMING_SNAKE_CASE__ ,)
for d in range(SCREAMING_SNAKE_CASE__ )
] )
if norm_in_type == "layer":
SCREAMING_SNAKE_CASE:Optional[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ )
elif norm_in_type is None:
SCREAMING_SNAKE_CASE:str = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
SCREAMING_SNAKE_CASE:Union[str, Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-10_000.0 )
causal_attention_mask.triu_(1 )
SCREAMING_SNAKE_CASE:Dict = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" ,SCREAMING_SNAKE_CASE__ ,persistent=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = nn.Parameter(torch.zeros(1 ,SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:Optional[Any] = nn.Parameter(torch.zeros(1 ,SCREAMING_SNAKE_CASE__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:str = {}
def fn_recursive_add_processors(SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : torch.nn.Module ,SCREAMING_SNAKE_CASE__ : Dict[str, AttentionProcessor] ):
if hasattr(SCREAMING_SNAKE_CASE__ ,"set_processor" ):
SCREAMING_SNAKE_CASE:Any = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return processors
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
SCREAMING_SNAKE_CASE:List[str] = len(self.attn_processors.keys() )
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(SCREAMING_SNAKE_CASE__ )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : torch.nn.Module ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if hasattr(SCREAMING_SNAKE_CASE__ ,"set_processor" ):
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
module.set_processor(SCREAMING_SNAKE_CASE__ )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
for name, module in self.named_children():
fn_recursive_attn_processor(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[Any] ):
self.set_attn_processor(AttnProcessor() )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Union[torch.Tensor, float, int] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.BoolTensor] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
SCREAMING_SNAKE_CASE:int = hidden_states.shape[0]
SCREAMING_SNAKE_CASE:Union[str, Any] = timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:Tuple = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(SCREAMING_SNAKE_CASE__ ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE:Union[str, Any] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE:Optional[int] = timesteps * torch.ones(SCREAMING_SNAKE_CASE__ ,dtype=timesteps.dtype ,device=timesteps.device )
SCREAMING_SNAKE_CASE:Optional[int] = self.time_proj(SCREAMING_SNAKE_CASE__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
SCREAMING_SNAKE_CASE:Union[str, Any] = timesteps_projected.to(dtype=self.dtype )
SCREAMING_SNAKE_CASE:Dict = self.time_embedding(SCREAMING_SNAKE_CASE__ )
if self.embedding_proj_norm is not None:
SCREAMING_SNAKE_CASE:List[str] = self.embedding_proj_norm(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = self.embedding_proj(SCREAMING_SNAKE_CASE__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE:int = self.encoder_hidden_states_proj(SCREAMING_SNAKE_CASE__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
SCREAMING_SNAKE_CASE:List[str] = self.proj_in(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = self.positional_embedding.to(hidden_states.dtype )
SCREAMING_SNAKE_CASE:Optional[Any] = []
SCREAMING_SNAKE_CASE:Dict = 0
if encoder_hidden_states is not None:
additional_embeds.append(SCREAMING_SNAKE_CASE__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
SCREAMING_SNAKE_CASE:str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
SCREAMING_SNAKE_CASE:List[Any] = hidden_states[:, None, :]
SCREAMING_SNAKE_CASE:str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE:List[Any] = self.prd_embedding.to(hidden_states.dtype ).expand(SCREAMING_SNAKE_CASE__ ,-1 ,-1 )
additional_embeds.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = torch.cat(
SCREAMING_SNAKE_CASE__ ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
SCREAMING_SNAKE_CASE:List[Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
SCREAMING_SNAKE_CASE:List[str] = F.pad(
SCREAMING_SNAKE_CASE__ ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
SCREAMING_SNAKE_CASE:Tuple = hidden_states + positional_embeddings
if attention_mask is not None:
SCREAMING_SNAKE_CASE:str = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
SCREAMING_SNAKE_CASE:Any = F.pad(SCREAMING_SNAKE_CASE__ ,(0, self.additional_embeddings) ,value=0.0 )
SCREAMING_SNAKE_CASE:Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
SCREAMING_SNAKE_CASE:str = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
SCREAMING_SNAKE_CASE:int = self.norm_in(SCREAMING_SNAKE_CASE__ )
for block in self.transformer_blocks:
SCREAMING_SNAKE_CASE:Dict = block(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = self.norm_out(SCREAMING_SNAKE_CASE__ )
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE:int = hidden_states[:, -1]
else:
SCREAMING_SNAKE_CASE:Dict = hidden_states[:, additional_embeddings_len:]
SCREAMING_SNAKE_CASE:Union[str, Any] = self.proj_to_clip_embeddings(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ):
SCREAMING_SNAKE_CASE:str = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 465 |
'''simple docstring'''
def A_ ( snake_case = 100 ):
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:Optional[int] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 465 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.