code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = BlipImageProcessor()
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""")
__SCREAMING_SNAKE_CASE = BlipaProcessor(snake_case__ , snake_case__)
processor.save_pretrained(self.tmpdirname)
def snake_case_ ( self , **lowerCAmelCase__):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__).tokenizer
def snake_case_ ( self , **lowerCAmelCase__):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__).image_processor
def snake_case_ ( self):
shutil.rmtree(self.tmpdirname)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1)) for x in image_inputs]
return image_inputs
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0)
__SCREAMING_SNAKE_CASE = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case__ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , snake_case__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , snake_case__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__)
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(snake_case__ , return_tensors="""np""")
__SCREAMING_SNAKE_CASE = processor(images=snake_case__ , return_tensors="""np""")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__)
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = processor(text=snake_case__)
__SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_token_type_ids=snake_case__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__)
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=snake_case__ , images=snake_case__)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """input_ids""", """attention_mask"""])
# test if it raises when no input is passed
with pytest.raises(snake_case__):
processor()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__)
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(snake_case__)
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(snake_case__)
self.assertListEqual(snake_case__ , snake_case__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__)
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=snake_case__ , images=snake_case__)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """input_ids""", """attention_mask"""])
| 100 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=True , snake_case_="pt" ):
'''simple docstring'''
_UpperCAmelCase = {"add_prefix_space": True} if isinstance(snake_case_ , snake_case_ ) and not line.startswith(" " ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=snake_case_ , padding="max_length" if pad_to_max_length else None , truncation=snake_case_ , return_tensors=snake_case_ , add_special_tokens=snake_case_ , **snake_case_ , )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=None , ):
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(snake_case_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str]="train" , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : List[str]="" , ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(snake_case__ ).joinpath(type_path + ".source" )
_UpperCAmelCase = Path(snake_case__ ).joinpath(type_path + ".target" )
_UpperCAmelCase = self.get_char_lens(self.src_file )
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Optional[int] ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Optional[Any] , snake_case__ : str ):
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip("\n" )
_UpperCAmelCase = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip("\n" )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
_UpperCAmelCase = encode_line(snake_case__ , snake_case__ , self.max_source_length , "right" )
_UpperCAmelCase = encode_line(snake_case__ , snake_case__ , self.max_target_length , "right" )
_UpperCAmelCase = source_inputs["input_ids"].squeeze()
_UpperCAmelCase = target_inputs["input_ids"].squeeze()
_UpperCAmelCase = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase ( snake_case__ : Optional[Any] ):
"""simple docstring"""
return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()]
def UpperCamelCase ( self : Any , snake_case__ : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = torch.stack([x["input_ids"] for x in batch] )
_UpperCAmelCase = torch.stack([x["attention_mask"] for x in batch] )
_UpperCAmelCase = torch.stack([x["decoder_input_ids"] for x in batch] )
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(snake_case__ , snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ )
_UpperCAmelCase = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowercase_ : Dict = getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return list(itertools.chain.from_iterable(snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(snake_case_ , os.path.join(snake_case_ , "git_log.json" ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=4 , **snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "w" ) as f:
json.dump(snake_case_ , snake_case_ , indent=snake_case_ , **snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
with open(snake_case_ ) as f:
return json.load(snake_case_ )
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=snake_case_ )
_UpperCAmelCase = {
"repo_id": str(snake_case_ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
return list(map(snake_case_ , snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "wb" ) as f:
return pickle.dump(snake_case_ , snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
def remove_articles(snake_case_ ):
return re.sub(R"\b(a|an|the)\b" , " " , snake_case_ )
def white_space_fix(snake_case_ ):
return " ".join(text.split() )
def remove_punc(snake_case_ ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case_ ) ) ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = normalize_answer(snake_case_ ).split()
_UpperCAmelCase = normalize_answer(snake_case_ ).split()
_UpperCAmelCase = Counter(snake_case_ ) & Counter(snake_case_ )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(snake_case_ )
_UpperCAmelCase = 1.0 * num_same / len(snake_case_ )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
return normalize_answer(snake_case_ ) == normalize_answer(snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
assert len(snake_case_ ) == len(snake_case_ )
_UpperCAmelCase = 0
for hypo, pred in zip(snake_case_ , snake_case_ ):
em += exact_match_score(snake_case_ , snake_case_ )
if len(snake_case_ ) > 0:
em /= len(snake_case_ )
return {"em": em}
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = "dropout_rate"
for p in extra_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
if not hasattr(snake_case_ , snake_case_ ) and not hasattr(snake_case_ , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(snake_case_ ) )
delattr(snake_case_ , snake_case_ )
continue
_UpperCAmelCase = p if hasattr(snake_case_ , snake_case_ ) else equivalent_param[p]
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
delattr(snake_case_ , snake_case_ )
return hparams, config
| 133 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
lowerCAmelCase__ :Optional[Any] = 'A painting of a squirrel eating a burger'
lowerCAmelCase__ :int = jax.device_count()
lowerCAmelCase__ :Optional[Any] = num_samples * [prompt]
lowerCAmelCase__ :Optional[Any] = sd_pipe.prepare_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = replicate(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = shard(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase__ :Optional[Any] = jax.random.split(__UpperCAmelCase , jax.device_count() )
lowerCAmelCase__ :Dict = sd_pipe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_inference_steps=2_5 , jit=__UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase__ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase__ :Any = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase__ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase__ :str = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = 'stabilityai/stable-diffusion-2'
lowerCAmelCase__ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__UpperCAmelCase , subfolder='scheduler' )
lowerCAmelCase__ :List[str] = FlaxStableDiffusionPipeline.from_pretrained(
__UpperCAmelCase , scheduler=__UpperCAmelCase , revision='bf16' , dtype=jnp.bfloataa , )
lowerCAmelCase__ :Optional[int] = scheduler_params
lowerCAmelCase__ :Any = 'A painting of a squirrel eating a burger'
lowerCAmelCase__ :str = jax.device_count()
lowerCAmelCase__ :List[str] = num_samples * [prompt]
lowerCAmelCase__ :Union[str, Any] = sd_pipe.prepare_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = replicate(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = shard(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase__ :str = jax.random.split(__UpperCAmelCase , jax.device_count() )
lowerCAmelCase__ :Any = sd_pipe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_inference_steps=2_5 , jit=__UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase__ :Dict = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase__ :Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase__ :Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 371 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :list[list[int]] = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowerCAmelCase__ :str = 1
for n in range(m + 1 ):
for k in range(1 , _SCREAMING_SNAKE_CASE ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__A = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
__A = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""")
| 254 | 0 |
"""simple docstring"""
import random
from typing import Any
def UpperCamelCase__ ( lowercase__ : list ):
for _ in range(len(lowercase__ ) ):
snake_case : List[Any] = random.randint(0 , len(lowercase__ ) - 1 )
snake_case : List[str] = random.randint(0 , len(lowercase__ ) - 1 )
snake_case , snake_case : List[str] = data[b], data[a]
return data
if __name__ == "__main__":
__A = [0, 1, 2, 3, 4, 5, 6, 7]
__A = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 148 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__A = "base_with_context"
def UpperCamelCase__ ( lowercase__ : Optional[Any] , lowercase__ : List[Any] ):
snake_case : Dict = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
snake_case : Tuple = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=lowercase__ )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case : Tuple = weights[F'''layers_{lyr_num}''']
snake_case : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
snake_case : List[Any] = ly_weight["attention"]
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCamelCase__ ( lowercase__ : Tuple , lowercase__ : List[Any] ):
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=lowercase__ )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case : str = weights[F'''layers_{lyr_num}''']
snake_case : Any = ly_weight["attention"]
snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : Dict = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : int = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case : int = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCamelCase__ ( lowercase__ : str , lowercase__ : Union[str, Any] ):
snake_case : int = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
snake_case : Tuple = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=lowercase__ )
snake_case : Tuple = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
snake_case : Union[str, Any] = weights[F'''layers_{lyr_num}''']
snake_case : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
snake_case : Any = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
snake_case : Union[str, Any] = ly_weight["self_attention"]
snake_case : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : List[str] = ly_weight["MultiHeadDotProductAttention_0"]
snake_case : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : Dict = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
snake_case : Dict = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
snake_case : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case : List[str] = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def UpperCamelCase__ ( lowercase__ : Any ):
snake_case : Union[str, Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
snake_case : List[Any] = jnp.tree_util.tree_map(onp.array , lowercase__ )
snake_case : Tuple = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
snake_case : List[str] = os.path.join(args.checkpoint_path , ".." , "config.gin" )
snake_case : List[str] = inference.parse_training_gin_file(lowercase__ , lowercase__ )
snake_case : List[Any] = inference.InferenceModel(args.checkpoint_path , lowercase__ )
snake_case : str = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
snake_case : int = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
snake_case : Tuple = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
snake_case : str = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
snake_case : Optional[int] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , lowercase__ )
snake_case : Any = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , lowercase__ )
snake_case : List[Any] = load_decoder(ta_checkpoint["target"]["decoder"] , lowercase__ )
snake_case : int = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
snake_case : Tuple = SpectrogramDiffusionPipeline(
notes_encoder=lowercase__ , continuous_encoder=lowercase__ , decoder=lowercase__ , scheduler=lowercase__ , melgan=lowercase__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f'{MODEL}/checkpoint_500000',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
__A = parser.parse_args()
main(args)
| 148 | 1 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__A = logging.getLogger(__name__)
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
if os.path.exists(_lowercase ):
if os.path.exists(os.path.join(_lowercase , '''config.json''' ) ) and os.path.isfile(
os.path.join(_lowercase , '''config.json''' ) ):
os.remove(os.path.join(_lowercase , '''config.json''' ) )
if os.path.exists(os.path.join(_lowercase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(_lowercase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(_lowercase , '''pytorch_model.bin''' ) )
else:
os.makedirs(_lowercase )
model.save_pretrained(_lowercase )
def __A ( _lowercase , _lowercase=False ):
'''simple docstring'''
_A = 2
if unlogit:
_A = torch.pow(_lowercase , _lowercase )
_A = p * torch.log(_lowercase )
_A = 0
return -plogp.sum(dim=-1 )
def __A ( _lowercase ):
'''simple docstring'''
logger.info('''lv, h >\t''' + '''\t'''.join(f"""{x + 1}""" for x in range(len(_lowercase ) ) ) )
for row in range(len(_lowercase ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase=True , _lowercase=None , _lowercase=False ):
'''simple docstring'''
_A ,_A = model.config.num_hidden_layers, model.config.num_attention_heads
_A = torch.zeros(_lowercase , _lowercase ).to(args.device )
_A = torch.zeros(_lowercase , _lowercase ).to(args.device )
if head_mask is None:
_A = torch.ones(_lowercase , _lowercase ).to(args.device )
head_mask.requires_grad_(requires_grad=_lowercase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_A = None
_A = 0.0
_A = 0.0
for step, inputs in enumerate(tqdm(_lowercase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
_A = tuple(t.to(args.device ) for t in inputs )
((_A) ,) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_A = model(_lowercase , labels=_lowercase , head_mask=_lowercase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_A ,_A ,_A = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_lowercase ):
_A = entropy(attn.detach() , _lowercase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_lowercase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_A = 2
_A = torch.pow(torch.pow(_lowercase , _lowercase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(_lowercase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(_lowercase )
logger.info('''Head ranked by importance scores''' )
_A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_A = torch.arange(
head_importance.numel() , device=args.device )
_A = head_ranks.view_as(_lowercase )
print_ad_tensor(_lowercase )
return attn_entropy, head_importance, total_loss
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A ,_A ,_A = compute_heads_importance(_lowercase , _lowercase , _lowercase , compute_entropy=_lowercase )
_A = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , _lowercase , original_score * args.masking_threshold )
_A = torch.ones_like(_lowercase )
_A = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_A = original_score
while current_score >= original_score * args.masking_threshold:
_A = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_A = float('''Inf''' )
_A = head_importance.view(-1 ).sort()[1]
if len(_lowercase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
_A = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
_A = new_head_mask.view(-1 )
_A = 0.0
_A = new_head_mask.view_as(_lowercase )
_A = new_head_mask.clone().detach()
print_ad_tensor(_lowercase )
# Compute metric and head importance again
_A ,_A ,_A = compute_heads_importance(
_lowercase , _lowercase , _lowercase , compute_entropy=_lowercase , head_mask=_lowercase )
_A = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , _lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('''Final head mask''' )
print_ad_tensor(_lowercase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = datetime.now()
_A ,_A ,_A = compute_heads_importance(
_lowercase , _lowercase , _lowercase , compute_entropy=_lowercase , compute_importance=_lowercase , head_mask=_lowercase )
_A = 1 / loss
_A = datetime.now() - before_time
_A = sum(p.numel() for p in model.parameters() )
_A = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowercase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_lowercase , _lowercase ):
_A = [
v,
]
assert sum(len(_lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_lowercase )
_A = sum(p.numel() for p in model.parameters() )
_A = datetime.now()
_A ,_A ,_A = compute_heads_importance(
_lowercase , _lowercase , _lowercase , compute_entropy=_lowercase , compute_importance=_lowercase , head_mask=_lowercase , actually_pruned=_lowercase , )
_A = 1 / loss
_A = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , _lowercase , _lowercase , pruned_num_params / original_num_params * 1_00 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , _lowercase , _lowercase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_00 )
save_model(_lowercase , args.output_dir )
def __A ( ):
'''simple docstring'''
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=_lowercase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=_lowercase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=_lowercase , type=_lowercase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=_lowercase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=_lowercase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=_lowercase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=_lowercase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=_lowercase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=_lowercase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=_lowercase , default=42 )
parser.add_argument('''--local_rank''' , type=_lowercase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=_lowercase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_lowercase , default='''''' , help='''Can be used for distant debugging.''' )
_A = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowercase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_A = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
_A = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_A = torch.device('''cuda''' , args.local_rank )
_A = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_A = nn.parallel.DistributedDataParallel(
_lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowercase )
elif args.n_gpu > 1:
_A = nn.DataParallel(_lowercase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_lowercase )
torch.save(_lowercase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , _lowercase )
# Prepare dataset
_A = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_A = (torch.from_numpy(_lowercase ),)
_A = TensorDataset(*_lowercase )
_A = RandomSampler(_lowercase )
_A = DataLoader(_lowercase , sampler=_lowercase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_lowercase , _lowercase , _lowercase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_A = mask_heads(_lowercase , _lowercase , _lowercase )
prune_heads(_lowercase , _lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 75 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
_A = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(_lowercase ):
os.makedirs(_lowercase )
_A = model.state_dict()
def to_tf_var_name(_lowercase ):
for patt, repl in iter(_lowercase ):
_A = name.replace(_lowercase , _lowercase )
return f"""bert/{name}"""
def create_tf_var(_lowercase , _lowercase , _lowercase ):
_A = tf.dtypes.as_dtype(tensor.dtype )
_A = tf.get_variable(dtype=_lowercase , shape=tensor.shape , name=_lowercase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowercase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_A = to_tf_var_name(_lowercase )
_A = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_A = torch_tensor.T
_A = create_tf_var(tensor=_lowercase , name=_lowercase , session=_lowercase )
tf.keras.backend.set_value(_lowercase , _lowercase )
_A = session.run(_lowercase )
print(f"""Successfully created {tf_name}: {np.allclose(_lowercase , _lowercase )}""" )
_A = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowercase , os.path.join(_lowercase , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def __A ( _lowercase=None ):
'''simple docstring'''
_A = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_lowercase , required=_lowercase , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=_lowercase , default=_lowercase , required=_lowercase , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=_lowercase , required=_lowercase , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=_lowercase , required=_lowercase , help='''Directory in which to save tensorflow model''' )
_A = parser.parse_args(_lowercase )
_A = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowercase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 75 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowercase (_a , unittest.TestCase ):
"""simple docstring"""
_snake_case = UnCLIPImageVariationPipeline
_snake_case = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
_snake_case = IMAGE_VARIATION_BATCH_PARAMS
_snake_case = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
_snake_case = False
@property
def UpperCAmelCase ( self ) -> List[Any]:
return 3_2
@property
def UpperCAmelCase ( self ) -> Optional[int]:
return 3_2
@property
def UpperCAmelCase ( self ) -> Union[str, Any]:
return self.time_input_dim
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self ) -> List[Any]:
return 1_0_0
@property
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(A )
@property
def UpperCAmelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
snake_case : Dict = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
snake_case : Dict = UnCLIPTextProjModel(**A )
return model
@property
def UpperCAmelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case : Union[str, Any] = {
'sample_size': 3_2,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
snake_case : Optional[int] = UNetaDConditionModel(**A )
return model
@property
def UpperCAmelCase ( self ) -> str:
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def UpperCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
snake_case : Dict = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def UpperCAmelCase ( self ) -> Tuple:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
snake_case : Tuple = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def UpperCAmelCase ( self ) -> str:
snake_case : Union[str, Any] = self.dummy_decoder
snake_case : Dict = self.dummy_text_proj
snake_case : List[Any] = self.dummy_text_encoder
snake_case : Tuple = self.dummy_tokenizer
snake_case : List[str] = self.dummy_super_res_first
snake_case : List[Any] = self.dummy_super_res_last
snake_case : Dict = UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1_0_0_0 , )
snake_case : Tuple = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1_0_0_0 , )
snake_case : Optional[Any] = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
snake_case : Any = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def UpperCAmelCase ( self , A , A=0 , A=True ) -> Union[str, Any]:
snake_case : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
if str(A ).startswith("""mps""" ):
snake_case : List[Any] = torch.manual_seed(A )
else:
snake_case : Any = torch.Generator(device=A ).manual_seed(A )
if pil_image:
snake_case : Dict = input_image * 0.5 + 0.5
snake_case : Tuple = input_image.clamp(0 , 1 )
snake_case : Optional[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case : List[str] = DiffusionPipeline.numpy_to_pil(A )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Optional[int] = 'cpu'
snake_case : List[str] = self.get_dummy_components()
snake_case : Any = self.pipeline_class(**A )
snake_case : Optional[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : Union[str, Any] = self.get_dummy_inputs(A , pil_image=A )
snake_case : Optional[int] = pipe(**A )
snake_case : int = output.images
snake_case : Tuple = self.get_dummy_inputs(A , pil_image=A )
snake_case : Union[str, Any] = pipe(
**A , return_dict=A , )[0]
snake_case : Dict = image[0, -3:, -3:, -1]
snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case : Optional[int] = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ) -> str:
snake_case : Any = 'cpu'
snake_case : Optional[int] = self.get_dummy_components()
snake_case : Union[str, Any] = self.pipeline_class(**A )
snake_case : Union[str, Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : List[str] = self.get_dummy_inputs(A , pil_image=A )
snake_case : List[Any] = pipe(**A )
snake_case : List[str] = output.images
snake_case : Any = self.get_dummy_inputs(A , pil_image=A )
snake_case : Tuple = pipe(
**A , return_dict=A , )[0]
snake_case : str = image[0, -3:, -3:, -1]
snake_case : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case : Any = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Union[str, Any] = 'cpu'
snake_case : Tuple = self.get_dummy_components()
snake_case : List[str] = self.pipeline_class(**A )
snake_case : Tuple = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : List[Any] = self.get_dummy_inputs(A , pil_image=A )
snake_case : Optional[int] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
snake_case : Any = pipe(**A )
snake_case : List[str] = output.images
snake_case : List[Any] = self.get_dummy_inputs(A , pil_image=A )
snake_case : Optional[Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
snake_case : List[str] = pipe(
**A , return_dict=A , )[0]
snake_case : Optional[int] = image[0, -3:, -3:, -1]
snake_case : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
snake_case : Tuple = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ) -> List[str]:
snake_case : int = torch.device("""cpu""" )
class __lowercase :
"""simple docstring"""
_snake_case = 1
snake_case : str = self.get_dummy_components()
snake_case : str = self.pipeline_class(**A )
snake_case : Any = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : Optional[Any] = torch.Generator(device=A ).manual_seed(0 )
snake_case : int = pipe.decoder.dtype
snake_case : Optional[int] = 1
snake_case : Optional[int] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case : Tuple = pipe.prepare_latents(
A , dtype=A , device=A , generator=A , latents=A , scheduler=DummyScheduler() )
snake_case : List[Any] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case : str = pipe.prepare_latents(
A , dtype=A , device=A , generator=A , latents=A , scheduler=DummyScheduler() )
snake_case : Dict = self.get_dummy_inputs(A , pil_image=A )
snake_case : Optional[Any] = pipe(
**A , decoder_latents=A , super_res_latents=A ).images
snake_case : Union[str, Any] = self.get_dummy_inputs(A , pil_image=A )
# Don't pass image, instead pass embedding
snake_case : Optional[Any] = pipeline_inputs.pop("""image""" )
snake_case : Dict = pipe.image_encoder(A ).image_embeds
snake_case : List[str] = pipe(
**A , decoder_latents=A , super_res_latents=A , image_embeddings=A , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def UpperCAmelCase ( self ) -> str:
snake_case : List[Any] = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case : Optional[int] = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=A , expected_max_diff=A )
@skip_mps
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : List[Any] = torch_device == 'cpu'
snake_case : List[Any] = True
snake_case : List[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=A , relax_max_difference=A , additional_params_copy_to_batched_inputs=A , )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Optional[int] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case : Union[str, Any] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=A , additional_params_copy_to_batched_inputs=A , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=A )
@skip_mps
def UpperCAmelCase ( self ) -> Any:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCAmelCase ( self ) -> List[Any]:
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase ( self ) -> Tuple:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" )
snake_case : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" )
snake_case : Optional[Any] = UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa )
snake_case : int = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
snake_case : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Dict = pipeline(
A , generator=A , output_type="""np""" , )
snake_case : List[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(A , A , 1_5 )
| 124 | """simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Dict = ["image_processor", "tokenizer"]
lowerCamelCase__ : Dict = "BlipImageProcessor"
lowerCamelCase__ : Union[str, Any] = "AutoTokenizer"
def __init__( self , a , a , a ) -> Optional[int]:
super().__init__(a , a )
# add QFormer tokenizer
lowercase__ : Dict = qformer_tokenizer
def __call__( self , a = None , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = False , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
lowercase__ : List[Any] = BatchFeature()
if text is not None:
lowercase__ : Optional[int] = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
encoding.update(a )
lowercase__ : Optional[int] = self.qformer_tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
lowercase__ : List[str] = qformer_text_encoding.pop('input_ids' )
lowercase__ : Any = qformer_text_encoding.pop('attention_mask' )
if images is not None:
lowercase__ : List[Any] = self.image_processor(a , return_tensors=a )
encoding.update(a )
return encoding
def _UpperCAmelCase ( self , *a , **a ) -> List[str]:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> Tuple:
return self.tokenizer.decode(*a , **a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = self.tokenizer.model_input_names
lowercase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _UpperCAmelCase ( self , a , **a ) -> Optional[int]:
if os.path.isfile(a ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(a , exist_ok=a )
lowercase__ : int = os.path.join(a , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(a )
return super().save_pretrained(a , **a )
@classmethod
def _UpperCAmelCase ( cls , a , **a ) -> str:
lowercase__ : str = AutoTokenizer.from_pretrained(a , subfolder='qformer_tokenizer' )
lowercase__ : int = cls._get_arguments_from_pretrained(a , **a )
args.append(a )
return cls(*a )
| 77 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Union[str, Any] = """dpr"""
def __init__( self , a=3_0522 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=1e-12 , a=0 , a="absolute" , a = 0 , **a , ):
super().__init__(pad_token_id=a , **a)
lowercase__ : Any = vocab_size
lowercase__ : Dict = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : List[Any] = hidden_act
lowercase__ : List[str] = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Any = initializer_range
lowercase__ : int = layer_norm_eps
lowercase__ : Any = projection_dim
lowercase__ : List[str] = position_embedding_type
| 216 |
from __future__ import annotations
from collections.abc import Callable
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int = 100 , ):
'''simple docstring'''
lowercase__ : Tuple = x_start
lowercase__ : Tuple = fnc(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = 0.0
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowercase__ : Any = (x_end - x_start) / steps + xa
lowercase__ : Optional[Any] = fnc(SCREAMING_SNAKE_CASE_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowercase__ : Any = xa
lowercase__ : str = fxa
return area
if __name__ == "__main__":
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
snake_case_ = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 216 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : Optional[int] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a__ : str = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase : List[str] = trt.Logger(trt.Logger.WARNING)
lowerCamelCase : List[str] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase : Tuple = logging.getLogger(__name__)
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=3_8_4,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=1_2_8,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=2_0,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=3_0,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=4_2, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
lowerCamelCase : List[str] = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
lowerCamelCase : Tuple = args.per_device_eval_batch_size
lowerCamelCase : Dict = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase : List[Any] = True
lowerCamelCase : Union[str, Any] = "temp_engine/bert-fp32.engine"
if args.fpaa:
lowerCamelCase : Tuple = "temp_engine/bert-fp16.engine"
if args.inta:
lowerCamelCase : Optional[int] = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
lowerCamelCase : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase : List[Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase : List[str] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase : str = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase : List[str] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def _lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.asarray(inputs['input_ids'] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE =np.asarray(inputs['attention_mask'] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE =np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _UpperCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _UpperCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _UpperCamelCase )
# start time
_SCREAMING_SNAKE_CASE =time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCamelCase ) for d_inp in d_inputs] + [int(_UpperCamelCase ), int(_UpperCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
cuda.memcpy_dtoh_async(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
_SCREAMING_SNAKE_CASE =time.time()
_SCREAMING_SNAKE_CASE =end_time - start_time
_SCREAMING_SNAKE_CASE =(h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase : Dict = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase : Any = raw_datasets["validation"].column_names
lowerCamelCase : Tuple = "question" if "question" in column_names else column_names[0]
lowerCamelCase : Any = "context" if "context" in column_names else column_names[1]
lowerCamelCase : List[str] = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase : Optional[int] = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase : List[str] = min(args.max_seq_length, tokenizer.model_max_length)
def _lowerCAmelCase ( _UpperCamelCase : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_SCREAMING_SNAKE_CASE =tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=_UpperCamelCase , stride=args.doc_stride , return_overflowing_tokens=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_SCREAMING_SNAKE_CASE =tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_SCREAMING_SNAKE_CASE =[]
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_SCREAMING_SNAKE_CASE =tokenized_examples.sequence_ids(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_SCREAMING_SNAKE_CASE =sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_SCREAMING_SNAKE_CASE =[
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
lowerCamelCase : Union[str, Any] = raw_datasets["validation"]
# Validation Feature Creation
lowerCamelCase : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
lowerCamelCase : List[Any] = default_data_collator
lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(["example_id", "offset_mapping"])
lowerCamelCase : Optional[Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Tuple="eval" ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =postprocess_qa_predictions(
examples=_UpperCamelCase , features=_UpperCamelCase , predictions=_UpperCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_UpperCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_SCREAMING_SNAKE_CASE =[
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
_SCREAMING_SNAKE_CASE =[{'id': k, 'prediction_text': v} for k, v in predictions.items()]
_SCREAMING_SNAKE_CASE =[{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCamelCase , label_ids=_UpperCamelCase )
lowerCamelCase : Dict = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowerCAmelCase ( _UpperCamelCase : str ) -> Any:
"""simple docstring"""
return trt.volume(engine.get_binding_shape(_UpperCamelCase ) ) * engine.get_binding_dtype(_UpperCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase : Union[str, Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase : List[str] = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase : Optional[int] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase : List[Any] = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase : Optional[Any] = 0.0
lowerCamelCase : str = 0
lowerCamelCase : Optional[int] = timeit.default_timer()
lowerCamelCase : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase : Optional[int] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase : int = outputs
lowerCamelCase : Optional[Any] = torch.tensor(start_logits)
lowerCamelCase : Optional[Any] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase : Any = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
lowerCamelCase : Optional[int] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
lowerCamelCase : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase : Union[str, Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
lowerCamelCase : Dict = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase : Tuple = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1_0_0_0 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1_0_0_0))
logger.info("Total Number of Inference = %d", niter)
lowerCamelCase : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 356 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
assert _test_patching.open is open
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , _UpperCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , _UpperCamelCase ):
pass
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , _UpperCamelCase ) is None
with patch_submodule(_test_patching , 'len' , _UpperCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_start_and_stop_mock__'
_SCREAMING_SNAKE_CASE =patch_submodule(_test_patching , 'open' , _UpperCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_successive_join__'
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_successive_dirname__'
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.rename' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , _UpperCamelCase ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , _UpperCamelCase ):
pass
| 114 | 0 |
from __future__ import annotations
from fractions import Fraction
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : str = []
__lowercase : Optional[Any] = 11
__lowercase : List[Any] = int("""1""" + """0""" * digit_len )
for num in range(snake_case__ , snake_case__ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(snake_case__ , snake_case__ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
__lowercase : Any = 10
return solutions
def snake_case_ ( lowerCAmelCase_ : Optional[Any] = 2 ):
__lowercase : str = 1.0
for fraction in fraction_list(snake_case__ ):
__lowercase : List[Any] = Fraction(snake_case__ )
result *= frac.denominator / frac.numerator
return int(snake_case__ )
if __name__ == "__main__":
print(solution()) | 233 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]:
lowerCAmelCase = len(snake_case__ )
for i in range(length - 1 ):
lowerCAmelCase = i
for k in range(i + 1 , snake_case__ ):
if collection[k] < collection[least]:
lowerCAmelCase = k
if least != i:
lowerCAmelCase , lowerCAmelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowercase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
lowercase__ : str = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 338 | 0 |
"""simple docstring"""
import re
def lowercase ( A_ )-> list:
'''simple docstring'''
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def lowercase ( A_ )-> str:
'''simple docstring'''
a : Optional[Any] = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowercase ( A_ , A_ , A_ )-> str:
'''simple docstring'''
try:
a : Any = split_input(A_ )
if upper:
a : Optional[int] = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
a : Tuple = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowercase ( A_ )-> str:
'''simple docstring'''
return to_simple_case(A_ )
def lowercase ( A_ )-> str:
'''simple docstring'''
try:
a : int = to_simple_case(A_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowercase ( A_ , A_ )-> str:
'''simple docstring'''
return to_complex_case(A_ , A_ , "_" )
def lowercase ( A_ , A_ )-> str:
'''simple docstring'''
return to_complex_case(A_ , A_ , "-" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 226 |
"""simple docstring"""
from __future__ import annotations
import queue
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : Union[str, Any]):
a : Optional[Any] = data
a : Optional[int] = None
a : Union[str, Any] = None
def lowercase ( )-> TreeNode:
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
a : int = input("Enter the value of the root node: " ).strip().lower()
a : queue.Queue = queue.Queue()
a : Tuple = TreeNode(int(A_ ) )
q.put(A_ )
while not q.empty():
a : Union[str, Any] = q.get()
a : Optional[int] = F'''Enter the left node of {node_found.data}: '''
a : Union[str, Any] = input(A_ ).strip().lower() or "n"
if check == "n":
return tree_node
a : List[str] = TreeNode(int(A_ ) )
a : Any = left_node
q.put(A_ )
a : Dict = F'''Enter the right node of {node_found.data}: '''
a : str = input(A_ ).strip().lower() or "n"
if check == "n":
return tree_node
a : Any = TreeNode(int(A_ ) )
a : str = right_node
q.put(A_ )
raise
def lowercase ( A_ )-> None:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowercase ( A_ )-> None:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowercase ( A_ )-> None:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowercase ( A_ )-> None:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not node:
return
a : queue.Queue = queue.Queue()
q.put(A_ )
while not q.empty():
a : str = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase ( A_ )-> None:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not node:
return
a : queue.Queue = queue.Queue()
q.put(A_ )
while not q.empty():
a : Any = []
while not q.empty():
a : Any = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(A_ )
def lowercase ( A_ )-> None:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not node:
return
a : list[TreeNode] = []
a : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(A_ )
a : Tuple = n.left
# end of while means current node doesn't have left child
a : Optional[Any] = stack.pop()
# start to traverse its right child
a : str = n.right
def lowercase ( A_ )-> None:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not node:
return
a : list[TreeNode] = []
a : Union[str, Any] = node
while n or stack:
while n:
stack.append(A_ )
a : int = n.left
a : str = stack.pop()
print(n.data , end="," )
a : Optional[int] = n.right
def lowercase ( A_ )-> None:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not node:
return
a , a : Tuple = [], []
a : Tuple = node
stacka.append(A_ )
while stacka: # to find the reversed order of post order, store it in stack2
a : Optional[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(A_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowercase ( A_ = "" , A_=50 , A_="*" )-> str:
'''simple docstring'''
if not s:
return "\n" + width * char
a , a : Dict = divmod(width - len(A_ ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
__lowercase = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 226 | 1 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCAmelCase : str = "cpu" , __UpperCAmelCase : str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
_A = device
_A = CLIPTokenizerFast.from_pretrained(__UpperCAmelCase )
_A = [0.48145466, 0.4578275, 0.40821073]
_A = [0.26862954, 0.26130258, 0.27577711]
_A = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_A = torchvision.transforms.Resize(224 )
_A = torchvision.transforms.CenterCrop(224 )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = self.resize(__UpperCAmelCase )
_A = self.center_crop(__UpperCAmelCase )
_A = self.normalize(__UpperCAmelCase )
return images
def __call__( self : Any , __UpperCAmelCase : int=None , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : int ):
'''simple docstring'''
_A = self.tokenizer(text=__UpperCAmelCase , **__UpperCAmelCase )
_A = self.preprocess_img(__UpperCAmelCase )
_A = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : Optional[Any]=10 , __UpperCAmelCase : List[str]=0.01 , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : str=False , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : List[Any]="image" , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Union[str, Any]=False , ):
'''simple docstring'''
super().__init__()
_A = None
_A = device if device else get_device()
if vqgan:
_A = vqgan
else:
_A = load_vqgan(self.device , conf_path=__UpperCAmelCase , ckpt_path=__UpperCAmelCase )
self.vqgan.eval()
if clip:
_A = clip
else:
_A = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
_A = ProcessorGradientFlow(device=self.device )
_A = iterations
_A = lr
_A = log
_A = make_grid
_A = return_val
_A = quantize
_A = self.vqgan.decoder.z_shape
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : int=5 , __UpperCAmelCase : Union[str, Any]=True ):
'''simple docstring'''
_A = []
if output_path is None:
_A = "./animation.gif"
if input_path is None:
_A = self.save_path
_A = sorted(glob(input_path + "/*" ) )
if not len(__UpperCAmelCase ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(__UpperCAmelCase ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
_A = total_duration / len(__UpperCAmelCase )
_A = [frame_duration] * len(__UpperCAmelCase )
if extend_frames:
_A = 1.5
_A = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(__UpperCAmelCase ) )
imageio.mimsave(__UpperCAmelCase , __UpperCAmelCase , duration=__UpperCAmelCase )
print(f'''gif saved to {output_path}''' )
def lowerCAmelCase ( self : int , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : List[str]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
_A = preprocess(Image.open(__UpperCAmelCase ) , target_image_size=256 ).to(self.device )
_A = preprocess_vqgan(__UpperCAmelCase )
_A , *_A = self.vqgan.encode(__UpperCAmelCase )
return z
def lowerCAmelCase ( self : str , __UpperCAmelCase : int ):
'''simple docstring'''
_A = self.latent.detach().requires_grad_()
_A = base_latent + transform_vector
if self.quantize:
_A , *_A = self.vqgan.quantize(__UpperCAmelCase )
else:
_A = trans_latent
return self.vqgan.decode(__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any]=None ):
'''simple docstring'''
_A = self.clip_preprocessor(text=__UpperCAmelCase , images=__UpperCAmelCase , return_tensors="pt" , padding=__UpperCAmelCase )
_A = self.clip(**__UpperCAmelCase )
_A = clip_outputs.logits_per_image
if weights is not None:
_A = similarity_logits * weights
return similarity_logits.sum()
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = self._get_clip_similarity(pos_prompts["prompts"] , __UpperCAmelCase , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
_A = self._get_clip_similarity(neg_prompts["prompts"] , __UpperCAmelCase , weights=neg_prompts["weights"] )
else:
_A = torch.tensor([1] , device=self.device )
_A = -torch.log(__UpperCAmelCase ) + torch.log(__UpperCAmelCase )
return loss
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int ):
'''simple docstring'''
_A = torch.randn_like(self.latent , requires_grad=__UpperCAmelCase , device=self.device )
_A = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_A = self._add_vector(__UpperCAmelCase )
_A = loop_post_process(__UpperCAmelCase )
_A = self._get_CLIP_loss(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
print("CLIP loss" , __UpperCAmelCase )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=__UpperCAmelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple ):
'''simple docstring'''
wandb.init(reinit=__UpperCAmelCase , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
_A = Image.open(__UpperCAmelCase )
_A = image.resize((256, 256) )
wandb.log("Original Image" , wandb.Image(__UpperCAmelCase ) )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
if not prompts:
return []
_A = []
_A = []
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(__UpperCAmelCase , (tuple, list) ):
_A = prompt[0]
_A = float(prompt[1] )
elif ":" in prompt:
_A , _A = prompt.split(":" )
_A = float(__UpperCAmelCase )
else:
_A = prompt
_A = 1.0
processed_prompts.append(__UpperCAmelCase )
weights.append(__UpperCAmelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__UpperCAmelCase , device=self.device ),
}
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Tuple=None , ):
'''simple docstring'''
if image_path:
_A = self._get_latent(__UpperCAmelCase )
else:
_A = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
assert pos_prompts, "You must provide at least one positive prompt."
_A = self.process_prompts(__UpperCAmelCase )
_A = self.process_prompts(__UpperCAmelCase )
if save_final and save_path is None:
_A = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(__UpperCAmelCase ):
os.makedirs(__UpperCAmelCase )
else:
_A = save_path + "_" + get_timestamp()
os.makedirs(__UpperCAmelCase )
_A = save_path
_A = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(__UpperCAmelCase ) )
_A = loop_post_process(__UpperCAmelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) ):
if show_intermediate:
show_pil(__UpperCAmelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"Image": wandb.Image(__UpperCAmelCase )} )
if show_final:
show_pil(__UpperCAmelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
| 79 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : int=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=99 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=None , ):
'''simple docstring'''
_A = parent
_A = 13
_A = 7
_A = True
_A = True
_A = True
_A = True
_A = 99
_A = 32
_A = 2
_A = 4
_A = 37
_A = "gelu"
_A = 0.1
_A = 0.1
_A = 512
_A = 16
_A = 2
_A = 0.02
_A = 3
_A = 4
_A = None
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = TFRoFormerModel(config=__UpperCAmelCase )
_A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_A = [input_ids, input_mask]
_A = model(__UpperCAmelCase )
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = True
_A = TFRoFormerForCausalLM(config=__UpperCAmelCase )
_A = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_A = model(__UpperCAmelCase )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = TFRoFormerForMaskedLM(config=__UpperCAmelCase )
_A = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = self.num_labels
_A = TFRoFormerForSequenceClassification(config=__UpperCAmelCase )
_A = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = self.num_choices
_A = TFRoFormerForMultipleChoice(config=__UpperCAmelCase )
_A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
_A = self.num_labels
_A = TFRoFormerForTokenClassification(config=__UpperCAmelCase )
_A = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : int ):
'''simple docstring'''
_A = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase )
_A = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case = False
snake_case = False
def lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = TFRoFormerModelTester(self )
_A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
_A = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A = model(__UpperCAmelCase )[0]
# TODO Replace vocab size
_A = 50000
_A = [1, 6, vocab_size]
self.assertEqual(output.shape , __UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_A = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = 1E-4
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = tf.constant([[4, 10]] )
_A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_A = emba(input_ids.shape )
_A = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
_A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
_A = emba.weight[:3, :5]
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = 1E-4
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_A = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_A = embed_positions([2, 16, 768] )[None, None, :, :]
_A , _A = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
_A = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
| 79 | 1 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Any ,_lowerCamelCase : Optional[Any] ) -> str:
_lowerCAmelCase : str = AutoConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : int = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase )
_lowerCAmelCase : Any = checkpoints.load_tax_checkpoint(_lowerCamelCase )
_lowerCAmelCase : Tuple = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
_lowerCAmelCase : Tuple = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_lowerCAmelCase : Optional[Any] = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Union[str, Any] = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
_lowerCAmelCase : Tuple = f"layers_{str(_lowerCamelCase )}"
# Self-Attention
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_lowerCAmelCase : int = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_lowerCAmelCase : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_lowerCAmelCase : Any = flax_model.params["""encoder"""]["""block"""][str(_lowerCamelCase )]["""layer"""]
_lowerCAmelCase : Any = tax_attention_key
_lowerCAmelCase : str = tax_attention_out
_lowerCAmelCase : Union[str, Any] = tax_attention_query
_lowerCAmelCase : Optional[Any] = tax_attention_value
_lowerCAmelCase : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Any = tax_global_layer_norm
if split_mlp_wi:
_lowerCAmelCase : Dict = tax_mlp_wi_a
_lowerCAmelCase : List[Any] = tax_mlp_wi_a
else:
_lowerCAmelCase : List[str] = tax_mlp_wi
_lowerCAmelCase : str = tax_mlp_wo
_lowerCAmelCase : Optional[Any] = tax_mlp_layer_norm
_lowerCAmelCase : Any = flax_model_encoder_layer_block
# Only for layer 0:
_lowerCAmelCase : Union[str, Any] = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Optional[Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Optional[int] = tax_encoder_global_rel_embedding
# Assigning
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
_lowerCAmelCase : Any = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_lowerCAmelCase : Optional[int] = f"layers_{str(_lowerCamelCase )}"
# Self-Attention
_lowerCAmelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
_lowerCAmelCase : int = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
_lowerCAmelCase : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Optional[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
_lowerCAmelCase : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
_lowerCAmelCase : List[str] = tax_enc_dec_attention_module["""key"""]["""kernel"""]
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_module["""out"""]["""kernel"""]
_lowerCAmelCase : List[str] = tax_enc_dec_attention_module["""query"""]["""kernel"""]
_lowerCAmelCase : Dict = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_lowerCAmelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_lowerCAmelCase : str = flax_model.params["""decoder"""]["""block"""][str(_lowerCamelCase )]["""layer"""]
_lowerCAmelCase : int = tax_attention_key
_lowerCAmelCase : List[str] = tax_attention_out
_lowerCAmelCase : Optional[Any] = tax_attention_query
_lowerCAmelCase : Dict = tax_attention_value
_lowerCAmelCase : str = tax_pre_attention_layer_norm
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_key
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_out
_lowerCAmelCase : Tuple = tax_enc_dec_attention_query
_lowerCAmelCase : Any = tax_enc_dec_attention_value
_lowerCAmelCase : Dict = tax_cross_layer_norm
if split_mlp_wi:
_lowerCAmelCase : Dict = tax_mlp_wi_a
_lowerCAmelCase : int = tax_mlp_wi_a
else:
_lowerCAmelCase : Optional[int] = tax_mlp_wi
_lowerCAmelCase : Dict = tax_mlp_wo
_lowerCAmelCase : List[Any] = txa_mlp_layer_norm
_lowerCAmelCase : Optional[Any] = flax_model_decoder_layer_block
# Decoder Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
_lowerCAmelCase : List[str] = txa_decoder_norm
# Only for layer 0:
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Union[str, Any] = tax_decoder_rel_embedding
# Token Embeddings
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
_lowerCAmelCase : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_lowerCAmelCase : Tuple = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(_lowerCamelCase )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_a : List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 126 | """simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[torch.FloatTensor] = None
_UpperCamelCase : torch.FloatTensor = None
_UpperCamelCase : Optional[Tuple[torch.FloatTensor]] = None
_UpperCamelCase : Optional[Tuple[torch.FloatTensor]] = None
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__=1 , a__=0 , a__=2 , a__=512 , a__="cls" , a__=False , a__=True , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
_lowerCAmelCase : Optional[Any] = project_dim
_lowerCAmelCase : List[str] = pooler_fn
_lowerCAmelCase : Any = learn_encoder
_lowerCAmelCase : Optional[int] = use_attention_mask
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = [R"pooler", R"logit_scale"]
_UpperCamelCase : List[Any] = [R"position_ids", R"predictions.decoder.bias"]
_UpperCamelCase : List[Any] = "roberta"
_UpperCamelCase : Optional[int] = RobertaSeriesConfig
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : str = XLMRobertaModel(a__ )
_lowerCAmelCase : Optional[Any] = nn.Linear(config.hidden_size , config.project_dim )
_lowerCAmelCase : List[Any] = getattr(a__ , """has_pre_transformation""" , a__ )
if self.has_pre_transformation:
_lowerCAmelCase : List[str] = nn.Linear(config.hidden_size , config.project_dim )
_lowerCAmelCase : Optional[int] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __A ( self , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , ):
_lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Optional[int] = self.base_model(
input_ids=a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_attentions=a__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=a__ , )
if self.has_pre_transformation:
_lowerCAmelCase : Optional[Any] = outputs["""hidden_states"""][-2]
_lowerCAmelCase : Optional[Any] = self.pre_LN(a__ )
_lowerCAmelCase : int = self.transformation_pre(a__ )
return TransformationModelOutput(
projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_lowerCAmelCase : Union[str, Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 126 | 1 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def _A ( _lowerCAmelCase="ro" , _lowerCAmelCase="en" , _lowerCAmelCase="wmt16" , _lowerCAmelCase=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
__lowercase =f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
__lowercase =datasets.load_dataset(__lowerCamelCase , __lowerCamelCase )
if save_dir is None:
__lowercase =f"""{dataset}-{pair}"""
__lowercase =Path(__lowerCamelCase )
save_dir.mkdir(exist_ok=__lowerCamelCase )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__lowercase ='val' if split == 'validation' else split
__lowercase =save_dir.joinpath(f"""{fn}.source""" )
__lowercase =save_dir.joinpath(f"""{fn}.target""" )
__lowercase =src_path.open('w+' )
__lowercase =tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__lowercase =x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 166 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2FeatureExtractor"]
__UpperCAmelCase = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''new-model'''
if is_tf_available():
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = NewModelConfig
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
A__ = '''bert-base-cased'''
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
A__ = TFAutoModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
'''simple docstring'''
A__ = '''bert-base-cased'''
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
A__ = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
A__ = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase__)
A__ , A__ = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
A__ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
A__ = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase__)
A__ , A__ = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__)
A__ , A__ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
A__ = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
A__ = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
@slow
@require_tensorflow_probability
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
A__ = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCAmelCase__)
A__ , A__ = TFAutoModelForTableQuestionAnswering.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->str:
'''simple docstring'''
A__ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
self.assertEqual(model.num_parameters() , 14_410)
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__) , 14_410)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
A__ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
self.assertEqual(model.num_parameters() , 14_410)
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__) , 14_410)
def SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
'''simple docstring'''
A__ = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''')
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
A__ = copy.deepcopy(model.config)
A__ = ['''FunnelBaseModel''']
A__ = TFAutoModel.from_config(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase__)
A__ = TFAutoModel.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
try:
AutoConfig.register('''new-model''' , UpperCAmelCase__)
A__ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(UpperCAmelCase__):
auto_class.register(UpperCAmelCase__ , UpperCAmelCase__)
auto_class.register(UpperCAmelCase__ , UpperCAmelCase__)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase__):
auto_class.register(UpperCAmelCase__ , UpperCAmelCase__)
# Now that the config is registered, it can be used as any other config with the auto-API
A__ = BertModelTester(self).get_config()
A__ = NewModelConfig(**tiny_config.to_dict())
A__ = auto_class.from_config(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase__)
A__ = auto_class.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase__ , '''bert-base is not a local folder and is not a valid model identifier'''):
A__ = TFAutoModel.from_pretrained('''bert-base''')
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase__ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
A__ = TFAutoModel.from_pretrained(UpperCAmelCase__ , revision='''aaaaaa''')
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
A__ = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''')
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(UpperCAmelCase__ , '''Use `from_pt=True` to load this model'''):
A__ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''')
with RequestCounter() as counter:
A__ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
# With a sharded checkpoint
A__ = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''')
with RequestCounter() as counter:
A__ = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 231 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase_ )
A__ = checkpoints.load_tax_checkpoint(lowercase_ )
A__ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
A__ = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
A__ = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
A__ = f"""layers_{str(lowercase_ )}"""
# Self-Attention
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
A__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
A__ = flax_model.params['''encoder''']['''block'''][str(lowercase_ )]['''layer''']
A__ = tax_attention_key
A__ = tax_attention_out
A__ = tax_attention_query
A__ = tax_attention_value
A__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_global_layer_norm
if split_mlp_wi:
A__ = tax_mlp_wi_a
A__ = tax_mlp_wi_a
else:
A__ = tax_mlp_wi
A__ = tax_mlp_wo
A__ = tax_mlp_layer_norm
A__ = flax_model_encoder_layer_block
# Only for layer 0:
A__ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
A__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
A__ = tax_encoder_global_rel_embedding
# Assigning
A__ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
A__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
A__ = f"""layers_{str(lowercase_ )}"""
# Self-Attention
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
A__ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
A__ = tax_enc_dec_attention_module['''key''']['''kernel''']
A__ = tax_enc_dec_attention_module['''out''']['''kernel''']
A__ = tax_enc_dec_attention_module['''query''']['''kernel''']
A__ = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
A__ = flax_model.params['''decoder''']['''block'''][str(lowercase_ )]['''layer''']
A__ = tax_attention_key
A__ = tax_attention_out
A__ = tax_attention_query
A__ = tax_attention_value
A__ = tax_pre_attention_layer_norm
A__ = tax_enc_dec_attention_key
A__ = tax_enc_dec_attention_out
A__ = tax_enc_dec_attention_query
A__ = tax_enc_dec_attention_value
A__ = tax_cross_layer_norm
if split_mlp_wi:
A__ = tax_mlp_wi_a
A__ = tax_mlp_wi_a
else:
A__ = tax_mlp_wi
A__ = tax_mlp_wo
A__ = txa_mlp_layer_norm
A__ = flax_model_decoder_layer_block
# Decoder Normalization
A__ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
A__ = txa_decoder_norm
# Only for layer 0:
A__ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
A__ = tax_decoder_rel_embedding
# Token Embeddings
A__ = tax_model['''target''']['''token_embedder''']['''embedding''']
A__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
A__ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(lowercase_ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 231 | 1 |
"""simple docstring"""
import string
from math import logaa
def A ( snake_case :List[str] , snake_case :Union[str, Any] ) -> int:
__UpperCamelCase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__UpperCamelCase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A ( snake_case :Optional[int] , snake_case :Tuple ) -> tuple[int, int]:
__UpperCamelCase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__UpperCamelCase = corpus_without_punctuation.split('\n' )
__UpperCamelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__lowerCAmelCase ))
def A ( snake_case :int , snake_case :List[str] , snake_case :Optional[int]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def A ( snake_case :int , snake_case :List[Any] ) -> float:
return round(tf * idf , 3 )
| 316 |
from torch import nn
class a ( nn.Module ):
def __init__( self :Tuple ,__lowercase :Optional[int] ,__lowercase :int ):
super().__init__()
snake_case__ : Optional[Any] = class_size
snake_case__ : Dict = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
snake_case__ : Dict = nn.Linear(__lowercase ,__lowercase )
def __lowerCamelCase ( self :str ,__lowercase :int ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
snake_case__ : Optional[Any] = self.mlp(__lowercase )
return logits
| 230 | 0 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
snake_case__ : Union[str, Any] = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class __SCREAMING_SNAKE_CASE ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self , snake_case_ = " " ):
'''simple docstring'''
UpperCAmelCase_ : Any = sentence_delimiter
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
return list(snake_case_ )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = []
for sent_idx, sentence in enumerate(snake_case_ ):
chars.extend(self.process_string(snake_case_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(snake_case_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
snake_case__ : List[Any] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
snake_case__ : Any = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
snake_case__ : List[Any] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
snake_case__ : Dict = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
snake_case__ : Tuple = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_=False ):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
snake_case_ , snake_case_ , truth_transform=snake_case_ , hypothesis_transform=snake_case_ , )["wer"]
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Optional[int] = 0
for prediction, reference in zip(snake_case_ , snake_case_ ):
UpperCAmelCase_ : List[Any] = jiwer.compute_measures(
snake_case_ , snake_case_ , truth_transform=snake_case_ , hypothesis_transform=snake_case_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 274 | '''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : List[Any] = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , snake_case_=None , snake_case_=None , *snake_case_ , **snake_case_ ):
'''simple docstring'''
super().__init__(*snake_case_ , **snake_case_ )
if config is None:
assert isinstance(self.model , snake_case_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
UpperCAmelCase_ : Tuple = self.model.config
else:
UpperCAmelCase_ : Optional[Any] = config
UpperCAmelCase_ : Optional[Any] = data_args
UpperCAmelCase_ : Dict = self.config.tgt_vocab_size if isinstance(self.config , snake_case_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
' padding..' )
if self.args.label_smoothing == 0:
UpperCAmelCase_ : Dict = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
UpperCAmelCase_ : Union[str, Any] = label_smoothed_nll_loss
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if self.optimizer is None:
UpperCAmelCase_ : Optional[Any] = ['bias', 'LayerNorm.weight']
UpperCAmelCase_ : Union[str, Any] = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
UpperCAmelCase_ : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
UpperCAmelCase_ : List[str] = Adafactor
UpperCAmelCase_ : int = {'scale_parameter': False, 'relative_step': False}
else:
UpperCAmelCase_ : Union[str, Any] = AdamW
UpperCAmelCase_ : Optional[int] = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
UpperCAmelCase_ : Optional[int] = self.args.learning_rate
if self.sharded_ddp:
UpperCAmelCase_ : Optional[Any] = OSS(
params=snake_case_ , optim=snake_case_ , **snake_case_ , )
else:
UpperCAmelCase_ : Tuple = optimizer_cls(snake_case_ , **snake_case_ )
if self.lr_scheduler is None:
UpperCAmelCase_ : int = self._get_lr_scheduler(snake_case_ )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : int = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
UpperCAmelCase_ : List[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
UpperCAmelCase_ : Optional[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
UpperCAmelCase_ : int = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case_ )
return scheduler
def _UpperCamelCase ( self ):
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
UpperCAmelCase_ : Any = model(**snake_case_ , use_cache=snake_case_ )[0]
UpperCAmelCase_ : int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = model(**snake_case_ , labels=snake_case_ , use_cache=snake_case_ )[:2]
else:
# compute label smoothed loss
UpperCAmelCase_ : List[str] = model(**snake_case_ , use_cache=snake_case_ )[0]
UpperCAmelCase_ : Optional[int] = torch.nn.functional.log_softmax(snake_case_ , dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.loss_fn(snake_case_ , snake_case_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = inputs.pop('labels' )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self._compute_loss(snake_case_ , snake_case_ , snake_case_ )
return loss
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self._prepare_inputs(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
UpperCAmelCase_ : Tuple = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **snake_case_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase_ : Tuple = self._pad_tensors_to_max_len(snake_case_ , gen_kwargs['max_length'] )
UpperCAmelCase_ : List[str] = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self._compute_loss(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ : Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
UpperCAmelCase_ : str = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase_ : List[Any] = self._pad_tensors_to_max_len(snake_case_ , gen_kwargs['max_length'] )
return (loss, logits, labels)
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F''' padded to `max_length`={max_length}''' )
UpperCAmelCase_ : Tuple = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
UpperCAmelCase_ : Dict = tensor
return padded_tensor
| 274 | 1 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowerCAmelCase__ ( nn.Module ):
def __init__( self ):
"""simple docstring"""
super().__init__()
lowercase_ : Union[str, Any] = nn.Linear(3 , 4 )
lowercase_ : Optional[int] = nn.BatchNormad(4 )
lowercase_ : List[Any] = nn.Linear(4 , 5 )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__UpperCAmelCase ) ) )
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
def _snake_case ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return output + 1
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = ModelForTest()
lowercase_ : Dict = ModelHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(test_model._hf_hook , __UpperCAmelCase )
self.assertTrue(hasattr(__UpperCAmelCase , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(__UpperCAmelCase )
self.assertFalse(hasattr(__UpperCAmelCase , '''_hf_hook''' ) )
self.assertFalse(hasattr(__UpperCAmelCase , '''_old_forward''' ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = ModelForTest()
lowercase_ : Optional[Any] = ModelHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase , append=__UpperCAmelCase )
self.assertEqual(isinstance(test_model._hf_hook , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__UpperCAmelCase , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(__UpperCAmelCase )
self.assertFalse(hasattr(__UpperCAmelCase , '''_hf_hook''' ) )
self.assertFalse(hasattr(__UpperCAmelCase , '''_old_forward''' ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = ModelForTest()
lowercase_ : Dict = torch.randn(2 , 3 )
lowercase_ : Dict = test_model(x + 1 )
lowercase_ : Dict = test_model(x + 2 )
lowercase_ : Tuple = PreForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowercase_ : Any = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowercase_ : Optional[int] = PreForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowercase_ : Tuple = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowercase_ : Dict = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowercase_ : List[str] = test_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = ModelForTest()
lowercase_ : Optional[Any] = torch.randn(2 , 3 )
lowercase_ : List[str] = test_model(__UpperCAmelCase )
lowercase_ : int = PostForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowercase_ : Tuple = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowercase_ : List[str] = PostForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowercase_ : List[Any] = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowercase_ : Dict = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowercase_ : str = test_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , output + 2 , atol=1E-5 )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = ModelForTest()
lowercase_ : Dict = torch.randn(2 , 3 )
lowercase_ : str = test_model(__UpperCAmelCase )
lowercase_ : Tuple = PostForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowercase_ : Tuple = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowercase_ : int = True
lowercase_ : List[Any] = test_model(__UpperCAmelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowercase_ : Union[str, Any] = torch.randn(2 , 3 )
lowercase_ : Optional[int] = model(__UpperCAmelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__UpperCAmelCase , AlignDevicesHook(io_same_device=__UpperCAmelCase ) )
lowercase_ : List[str] = torch.randn(2 , 3 ).to(0 )
lowercase_ : List[Any] = model(__UpperCAmelCase )
self.assertEqual(output.device , torch.device(0 ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
lowercase_ : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCAmelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowercase_ : int = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCAmelCase )
lowercase_ : Optional[int] = torch.randn(2 , 3 )
lowercase_ : Dict = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
lowercase_ : Optional[Any] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCAmelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
lowercase_ : Optional[int] = torch.randn(2 , 3 )
lowercase_ : Any = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
lowercase_ : Tuple = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__UpperCAmelCase , execution_device=__UpperCAmelCase , offload=__UpperCAmelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowercase_ : Union[str, Any] = torch.device(__UpperCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCAmelCase )
lowercase_ : int = torch.randn(2 , 3 )
lowercase_ : List[Any] = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__UpperCAmelCase , execution_device=__UpperCAmelCase , offload=__UpperCAmelCase , offload_buffers=__UpperCAmelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
lowercase_ : List[str] = torch.randn(2 , 3 )
lowercase_ : Any = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
lowercase_ : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__UpperCAmelCase , execution_device=__UpperCAmelCase , offload=__UpperCAmelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowercase_ : Optional[int] = torch.device(__UpperCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCAmelCase )
lowercase_ : str = torch.randn(2 , 3 )
lowercase_ : Optional[Any] = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__UpperCAmelCase , execution_device=__UpperCAmelCase , offload=__UpperCAmelCase , weights_map=model.state_dict() , offload_buffers=__UpperCAmelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
lowercase_ : Optional[int] = torch.randn(2 , 3 )
lowercase_ : Any = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 93 |
'''simple docstring'''
from pathlib import Path
import fire
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : List[str] = Path(lowerCAmelCase__ )
__UpperCAmelCase : str = Path(lowerCAmelCase__ )
dest_dir.mkdir(exist_ok=lowerCAmelCase__ )
for path in src_dir.iterdir():
__UpperCAmelCase : str = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCAmelCase : Optional[int] = dest_dir.joinpath(path.name )
print(lowerCAmelCase__ )
dest_path.open("""w""" ).write("""\n""".join(lowerCAmelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 254 | 0 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_ :
def __init__( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Union[str, Any]=13 , _snake_case : str=32 , _snake_case : List[Any]=2 , _snake_case : Dict=3 , _snake_case : Optional[int]=16 , _snake_case : Optional[Any]=[1, 2, 1] , _snake_case : Union[str, Any]=[2, 2, 4] , _snake_case : Optional[Any]=2 , _snake_case : Union[str, Any]=2.0 , _snake_case : Optional[int]=True , _snake_case : Union[str, Any]=0.0 , _snake_case : List[Any]=0.0 , _snake_case : Union[str, Any]=0.1 , _snake_case : List[str]="gelu" , _snake_case : Optional[int]=False , _snake_case : Optional[int]=True , _snake_case : str=0.02 , _snake_case : List[Any]=1E-5 , _snake_case : List[str]=True , _snake_case : str=None , _snake_case : int=True , _snake_case : List[Any]=10 , _snake_case : str=8 , )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : Any = image_size
__lowerCAmelCase : Union[str, Any] = patch_size
__lowerCAmelCase : List[Any] = num_channels
__lowerCAmelCase : List[str] = embed_dim
__lowerCAmelCase : Optional[Any] = depths
__lowerCAmelCase : Any = num_heads
__lowerCAmelCase : Union[str, Any] = window_size
__lowerCAmelCase : Union[str, Any] = mlp_ratio
__lowerCAmelCase : Any = qkv_bias
__lowerCAmelCase : Dict = hidden_dropout_prob
__lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase : Union[str, Any] = drop_path_rate
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Any = use_absolute_embeddings
__lowerCAmelCase : Optional[int] = patch_norm
__lowerCAmelCase : List[str] = layer_norm_eps
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : List[str] = is_training
__lowerCAmelCase : Tuple = scope
__lowerCAmelCase : List[str] = use_labels
__lowerCAmelCase : List[str] = type_sequence_label_size
__lowerCAmelCase : List[Any] = encoder_stride
def UpperCAmelCase__ ( self : Optional[int] )->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Any )->List[Any]:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : List[Any] , _snake_case : str , _snake_case : Optional[Any] , _snake_case : Dict )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowerCAmelCase : int = model(_snake_case )
__lowerCAmelCase : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCAmelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__ ( self : Any , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Union[str, Any] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Dict = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowerCAmelCase : Any = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : List[Any] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
__lowerCAmelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase : Dict = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Any , _snake_case : str , _snake_case : Optional[int] , _snake_case : Union[str, Any] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = self.type_sequence_label_size
__lowerCAmelCase : Tuple = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
__lowerCAmelCase : int = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : Union[str, Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = config_and_inputs
__lowerCAmelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __lowercase ,__lowercase ,unittest.TestCase ):
A_ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A_ = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def UpperCAmelCase__ ( self : Optional[int] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = SwinvaModelTester(self )
__lowerCAmelCase : Any = ConfigTester(self , config_class=_snake_case , embed_dim=37 )
def UpperCAmelCase__ ( self : int )->Tuple:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def UpperCAmelCase__ ( self : Optional[Any] )->Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[str] )->List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] )->Any:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : int = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def UpperCAmelCase__ ( self : List[str] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : int = model_class(_snake_case )
__lowerCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Tuple = [*signature.parameters.keys()]
__lowerCAmelCase : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _snake_case )
def UpperCAmelCase__ ( self : Union[str, Any] )->int:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Optional[Any] = True
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Tuple = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__lowerCAmelCase : Union[str, Any] = outputs.attentions
__lowerCAmelCase : str = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ) , _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase : str = True
__lowerCAmelCase : Tuple = config.window_size**2
__lowerCAmelCase : Tuple = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__lowerCAmelCase : Optional[int] = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__lowerCAmelCase : List[Any] = len(_snake_case )
# Check attention is always last and order is fine
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Any = True
__lowerCAmelCase : int = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(_snake_case , _snake_case ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
__lowerCAmelCase : List[str] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__lowerCAmelCase : Any = 2
self.assertEqual(out_len + added_hidden_states , len(_snake_case ) )
__lowerCAmelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Optional[int] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Tuple = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__lowerCAmelCase : str = outputs.hidden_states
__lowerCAmelCase : Dict = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) , _snake_case )
# Swinv2 has a different seq_length
__lowerCAmelCase : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__lowerCAmelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ) , _snake_case )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = reshaped_hidden_states[0].shape
__lowerCAmelCase : int = (
reshaped_hidden_states[0].view(_snake_case , _snake_case , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase__ ( self : Union[str, Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCAmelCase : int = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : Tuple = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , _snake_case )
def UpperCAmelCase__ ( self : Optional[int] )->List[str]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Tuple = 3
__lowerCAmelCase : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCAmelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCAmelCase : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : str = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , (padded_height, padded_width) )
def UpperCAmelCase__ ( self : int )->str:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCAmelCase__ ( self : Optional[Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase__ ( self : Dict )->Optional[Any]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : List[str] = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase__ ( self : int )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : int = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[Any] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class snake_case_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Tuple )->List[str]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Dict )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
_snake_case )
__lowerCAmelCase : Tuple = self.default_image_processor
__lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowerCAmelCase : Union[str, Any] = image_processor(images=_snake_case , return_tensors="""pt""" ).to(_snake_case )
# forward pass
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(**_snake_case )
# verify the logits
__lowerCAmelCase : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _snake_case )
__lowerCAmelCase : str = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) ) | 232 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'spiece.model'}
_UpperCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_UpperCAmelCase = {
'google/pegasus-xsum': 512,
}
_UpperCAmelCase = logging.get_logger(__name__)
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any]="<pad>" , _snake_case : int="</s>" , _snake_case : Any="<unk>" , _snake_case : Union[str, Any]="<mask_2>" , _snake_case : Any="<mask_1>" , _snake_case : Optional[int]=None , _snake_case : List[str]=103 , _snake_case : Optional[Dict[str, Any]] = None , **_snake_case : Optional[int] , )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = offset
if additional_special_tokens is not None:
if not isinstance(_snake_case , _snake_case ):
raise TypeError(
F'''additional_special_tokens should be of type {type(_snake_case )}, but is'''
F''' {type(_snake_case )}''' )
__lowerCAmelCase : List[str] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(_snake_case ) , self.offset - 1 )
]
if len(set(_snake_case ) ) != len(_snake_case ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__lowerCAmelCase : Dict = additional_special_tokens_extended
else:
__lowerCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
__lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_snake_case , unk_token=_snake_case , mask_token=_snake_case , pad_token=_snake_case , mask_token_sent=_snake_case , offset=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__lowerCAmelCase : Optional[Any] = mask_token_sent
__lowerCAmelCase : Any = vocab_file
__lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# add special tokens to encoder dict
__lowerCAmelCase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__lowerCAmelCase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase__ ( self : str )->int:
'''simple docstring'''
return len(self.sp_model ) + self.offset
def UpperCAmelCase__ ( self : Dict )->Dict[str, int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] )->str:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.__dict__.copy()
__lowerCAmelCase : Union[str, Any] = None
return state
def __setstate__( self : Any , _snake_case : str )->Any:
'''simple docstring'''
__lowerCAmelCase : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase : Any = {}
__lowerCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Dict , _snake_case : str )->List[str]:
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def UpperCAmelCase__ ( self : Tuple , _snake_case : str )->int:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowerCAmelCase : Any = self.sp_model.piece_to_id(_snake_case )
return sp_id + self.offset
def UpperCAmelCase__ ( self : List[Any] , _snake_case : int )->str:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowerCAmelCase : Optional[int] = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : Optional[int] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = []
__lowerCAmelCase : Dict = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case ) + token
__lowerCAmelCase : int = []
else:
current_sub_tokens.append(_snake_case )
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : Dict=False )->int:
'''simple docstring'''
return 1
def UpperCAmelCase__ ( self : Tuple , _snake_case : Tuple )->str:
'''simple docstring'''
__lowerCAmelCase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase__ ( self : List[str] , _snake_case : List , _snake_case : Optional[List] = None , _snake_case : bool = False )->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(_snake_case )
elif token_ids_a is None:
return self._special_token_mask(_snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase__ ( self : Any , _snake_case : Union[str, Any] , _snake_case : Tuple=None )->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Any , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase : Optional[int] = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , """wb""" ) as fi:
__lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,) | 232 | 1 |
'''simple docstring'''
from math import factorial, pi
def a_ ( __snake_case : float , __snake_case : int = 30 ) -> float:
"""simple docstring"""
if not isinstance(__snake_case , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(__snake_case , __snake_case ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
lowerCamelCase_ =float(__snake_case )
lowerCamelCase_ =theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__snake_case ) )
def a_ ( __snake_case : float , __snake_case : int = 30 ) -> float:
"""simple docstring"""
if not isinstance(__snake_case , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(__snake_case , __snake_case ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
lowerCamelCase_ =float(__snake_case )
lowerCamelCase_ =theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 75 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ ={
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
lowerCamelCase_ ={
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
lowerCamelCase_ =tempfile.mkdtemp()
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ =os.path.join(self.tmpdirname, lowerCAmelCase )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.feature_extraction_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
# load decoder from hub
lowerCamelCase_ ='''hf-internal-testing/ngram-beam-search-decoder'''
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor, lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, )
self.assertIsInstance(processor.decoder, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha, 5.0 )
self.assertEqual(processor.language_model.beta, 3.0 )
self.assertEqual(processor.language_model.score_boundary, -7.0 )
self.assertEqual(processor.language_model.unk_score_offset, 3 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(lowerCAmelCase, '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=lowerCAmelCase, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =floats_list((3, 1_000) )
lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' )
lowerCamelCase_ =processor(lowerCAmelCase, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ ='''This is a test string'''
lowerCamelCase_ =processor(text=lowerCAmelCase )
lowerCamelCase_ =tokenizer(lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self, lowerCAmelCase=(2, 10, 16), lowerCAmelCase=77 ):
"""simple docstring"""
np.random.seed(lowerCAmelCase )
return np.random.rand(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits(shape=(10, 16), seed=13 )
lowerCamelCase_ =processor.decode(lowerCAmelCase )
lowerCamelCase_ =decoder.decode_beams(lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0], decoded_processor.text )
self.assertEqual('''</s> <s> </s>''', decoded_processor.text )
self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase )
else:
with get_context(lowerCAmelCase ).Pool() as pool:
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =list(lowerCAmelCase )
with get_context('''fork''' ).Pool() as p:
lowerCamelCase_ =decoder.decode_beams_batch(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =[], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCAmelCase, decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''], decoded_processor.text )
self.assertListEqual(lowerCAmelCase, decoded_processor.logit_score )
self.assertListEqual(lowerCAmelCase, decoded_processor.lm_score )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =15
lowerCamelCase_ =-2_0.0
lowerCamelCase_ =-4.0
lowerCamelCase_ =processor.batch_decode(
lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, )
lowerCamelCase_ =decoded_processor_out.text
lowerCamelCase_ =list(lowerCAmelCase )
with get_context('''fork''' ).Pool() as pool:
lowerCamelCase_ =decoder.decode_beams_batch(
lowerCAmelCase, lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, )
lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out]
lowerCamelCase_ =[d[0][2] for d in decoded_decoder_out]
lowerCamelCase_ =[d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''], lowerCAmelCase )
self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7], lowerCAmelCase, atol=1e-3 ) )
self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4], lowerCAmelCase, atol=1e-3 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =2.0
lowerCamelCase_ =5.0
lowerCamelCase_ =-2_0.0
lowerCamelCase_ =True
lowerCamelCase_ =processor.batch_decode(
lowerCAmelCase, alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, )
lowerCamelCase_ =decoded_processor_out.text
lowerCamelCase_ =list(lowerCAmelCase )
decoder.reset_params(
alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, )
with get_context('''fork''' ).Pool() as pool:
lowerCamelCase_ =decoder.decode_beams_batch(
lowerCAmelCase, lowerCAmelCase, )
lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''], lowerCAmelCase )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha, 2.0 )
self.assertEqual(lm_model.beta, 5.0 )
self.assertEqual(lm_model.unk_score_offset, -2_0.0 )
self.assertEqual(lm_model.score_boundary, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCamelCase_ =os.listdir(lowerCAmelCase )
lowerCamelCase_ =['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =snapshot_download('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCamelCase_ =os.listdir(lowerCAmelCase )
lowerCamelCase_ =os.listdir(lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =floats_list((3, 1_000) )
lowerCamelCase_ =processor_wavaveca(lowerCAmelCase, return_tensors='''np''' )
lowerCamelCase_ =processor_auto(lowerCAmelCase, return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1e-2 )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =processor_wavaveca.batch_decode(lowerCAmelCase )
lowerCamelCase_ =processor_auto.batch_decode(lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text, decoded_auto.text )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
self.assertListEqual(
processor.model_input_names, feature_extractor.model_input_names, msg='''`processor` and `feature_extractor` model input names do not match''', )
@staticmethod
def lowercase__ ( lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[d[key] for d in offsets]
return retrieved_list
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =self._get_dummy_logits()[0]
lowerCamelCase_ =processor.decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ) ), outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''start_offset''' ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''end_offset''' ), [1, 3, 5] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ) for o in outputs['''word_offsets''']], outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''start_offset''' ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''end_offset''' ), [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase__ ( self ):
"""simple docstring"""
import torch
lowerCamelCase_ =load_dataset('''common_voice''', '''en''', split='''train''', streaming=lowerCAmelCase )
lowerCamelCase_ =ds.cast_column('''audio''', datasets.Audio(sampling_rate=16_000 ) )
lowerCamelCase_ =iter(lowerCAmelCase )
lowerCamelCase_ =next(lowerCAmelCase )
lowerCamelCase_ =AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
lowerCamelCase_ =WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCamelCase_ =processor(sample['''audio''']['''array'''], return_tensors='''pt''' ).input_values
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).logits.cpu().numpy()
lowerCamelCase_ =processor.decode(logits[0], output_word_offsets=lowerCAmelCase )
lowerCamelCase_ =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCamelCase_ =[
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
lowerCamelCase_ ='''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), lowerCAmelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), output.text )
# output times
lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''start_time''' ) )
lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''end_time''' ) )
# fmt: off
lowerCamelCase_ =torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
lowerCamelCase_ =torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
| 75 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Dict , _lowerCAmelCase : List[Any] , ):
A = parent
A = 13
A = 7
A = 30
A = self.seq_length + self.mem_len
A = 15
A = True
A = True
A = 99
A = [10, 50, 80]
A = 32
A = 32
A = 4
A = 8
A = 128
A = 2
A = 2
A = None
A = 1
A = 0
A = 3
A = self.vocab_size - 1
A = 0.01
def A (self : Any ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def A (self : str ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def A (self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] ):
A = TFTransfoXLModel(__lowerCAmelCase )
A , A = model(__lowerCAmelCase ).to_tuple()
A = {"""input_ids""": input_ids_a, """mems""": mems_a}
A , A = model(__lowerCAmelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def A (self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ):
A = TFTransfoXLLMHeadModel(__lowerCAmelCase )
A , A = model(__lowerCAmelCase ).to_tuple()
A = {"""input_ids""": input_ids_a, """labels""": lm_labels}
A , A = model(__lowerCAmelCase ).to_tuple()
A , A = model([input_ids_a, mems_a] ).to_tuple()
A = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
A , A = model(__lowerCAmelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def A (self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
A = TFTransfoXLForSequenceClassification(__lowerCAmelCase )
A = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A (self : Union[str, Any] ):
A = self.prepare_config_and_inputs()
((A) , (A) , (A) , (A)) = config_and_inputs
A = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__lowerCAmelCase = () if is_tf_available() else ()
__lowerCAmelCase = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def A (self : Dict ):
A = TFTransfoXLModelTester(self )
A = ConfigTester(self , config_class=__lowerCAmelCase , d_embed=37 )
def A (self : List[Any] ):
self.config_tester.run_common_tests()
def A (self : int ):
self.model_tester.set_seed()
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__lowerCAmelCase )
def A (self : int ):
self.model_tester.set_seed()
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__lowerCAmelCase )
def A (self : Any ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__lowerCAmelCase )
def A (self : Tuple ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
A = model_class(__lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
A = model.get_output_embeddings()
assert isinstance(__lowerCAmelCase , tf.keras.layers.Layer )
A = model.get_bias()
assert name is None
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
def A (self : Dict ):
pass
@slow
def A (self : Union[str, Any] ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFTransfoXLModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def A (self : List[Any] ):
pass
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def A (self : List[str] ):
A = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
A = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
A = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
A = model.generate(__lowerCAmelCase , max_length=200 , do_sample=__lowerCAmelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , __lowerCAmelCase )
| 360 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
if isinstance(UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCAmelCase :
'''simple docstring'''
def A (self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
pass
def A (self : List[str] ):
pass
def A (self : Union[str, Any] ):
pass
def A (self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ):
A = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = {"""vision_model""": vision_model, """text_model""": text_model}
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = after_output[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def A (self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ):
A = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def A (self : List[str] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def A (self : Optional[int] ):
A = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def A (self : List[Any] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def A (self : Tuple ):
A , A = self.get_pretrained_model_and_inputs()
A = model_a(**_lowerCAmelCase )
A = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model_a(**_lowerCAmelCase )
A = after_outputs[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : int ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
A = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Union[str, Any] ):
A = TFViTModelTester(self )
A = TFBertModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[int] ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : str ):
A = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : str ):
A = TFDeiTModelTester(self )
A = TFRobertaModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Dict ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Optional[Any] ):
A = TFCLIPVisionModelTester(self )
A = TFBertModelTester(self )
A = clip_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : Any ):
A = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
A = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
A = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 337 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __UpperCamelCase ( lowerCAmelCase__ : BertModel , lowerCAmelCase__ : str , lowerCAmelCase__ : str ):
__a : int = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__a : Optional[int] = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
__a : Optional[int] = model.state_dict()
def to_tf_var_name(lowerCAmelCase__ : str ):
for patt, repl in iter(lowerCAmelCase__ ):
__a : str = name.replace(lowerCAmelCase__ , lowerCAmelCase__ )
return f"bert/{name}"
def create_tf_var(lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : str , lowerCAmelCase__ : tf.Session ):
__a : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
__a : Tuple = tf.get_variable(dtype=lowerCAmelCase__ , shape=tensor.shape , name=lowerCAmelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCAmelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__a : Any = to_tf_var_name(lowerCAmelCase__ )
__a : str = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__a : int = torch_tensor.T
__a : Any = create_tf_var(tensor=lowerCAmelCase__ , name=lowerCAmelCase__ , session=lowerCAmelCase__ )
tf.keras.backend.set_value(lowerCAmelCase__ , lowerCAmelCase__ )
__a : Optional[Any] = session.run(lowerCAmelCase__ )
print(f"Successfully created {tf_name}: {np.allclose(lowerCAmelCase__ , lowerCAmelCase__ )}" )
__a : str = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any]=None ):
__a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory in which to save tensorflow model''' )
__a : Union[str, Any] = parser.parse_args(lowerCAmelCase__ )
__a : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 216 |
from __future__ import annotations
import math
import random
from typing import Any
class UpperCamelCase__ :
def __init__(self : Optional[Any] ):
__a : list[Any] = []
__a : int = 0
__a : int = 0
def lowerCAmelCase (self : Optional[int] ):
return self.head == self.tail
def lowerCAmelCase (self : List[Any] , snake_case_ : Any ):
self.data.append(snake_case_ )
__a : str = self.tail + 1
def lowerCAmelCase (self : Optional[int] ):
__a : int = self.data[self.head]
__a : Union[str, Any] = self.head + 1
return ret
def lowerCAmelCase (self : Union[str, Any] ):
return self.tail - self.head
def lowerCAmelCase (self : Union[str, Any] ):
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class UpperCamelCase__ :
def __init__(self : List[str] , snake_case_ : Any ):
__a : List[str] = data
__a : MyNode | None = None
__a : MyNode | None = None
__a : int = 1
def lowerCAmelCase (self : int ):
return self.data
def lowerCAmelCase (self : Dict ):
return self.left
def lowerCAmelCase (self : int ):
return self.right
def lowerCAmelCase (self : int ):
return self.height
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Any ):
__a : Tuple = data
def lowerCAmelCase (self : Any , snake_case_ : MyNode | None ):
__a : Any = node
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : MyNode | None ):
__a : List[str] = node
def lowerCAmelCase (self : Optional[int] , snake_case_ : int ):
__a : Union[str, Any] = height
def __UpperCamelCase ( lowerCAmelCase__ : MyNode | None ):
if node is None:
return 0
return node.get_height()
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
if a > b:
return a
return b
def __UpperCamelCase ( lowerCAmelCase__ : MyNode ):
print('''left rotation node:''' , node.get_data() )
__a : str = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowerCAmelCase__ )
__a : List[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
__a : Union[str, Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase__ )
return ret
def __UpperCamelCase ( lowerCAmelCase__ : MyNode ):
print('''right rotation node:''' , node.get_data() )
__a : List[Any] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowerCAmelCase__ )
__a : Union[str, Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
__a : List[str] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase__ )
return ret
def __UpperCamelCase ( lowerCAmelCase__ : MyNode ):
__a : Union[str, Any] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowerCAmelCase__ ) )
return right_rotation(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : MyNode ):
__a : Optional[int] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowerCAmelCase__ ) )
return left_rotation(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : MyNode | None , lowerCAmelCase__ : Any ):
if node is None:
return MyNode(lowerCAmelCase__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowerCAmelCase__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__a : Tuple = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__a : str = right_rotation(lowerCAmelCase__ )
else:
__a : Dict = lr_rotation(lowerCAmelCase__ )
else:
node.set_right(insert_node(node.get_right() , lowerCAmelCase__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__a : Dict = node.get_right()
assert right_child is not None
if data < right_child.get_data():
__a : str = rl_rotation(lowerCAmelCase__ )
else:
__a : Tuple = left_rotation(lowerCAmelCase__ )
__a : Any = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
return node
def __UpperCamelCase ( lowerCAmelCase__ : MyNode ):
while True:
__a : Union[str, Any] = root.get_right()
if right_child is None:
break
__a : str = right_child
return root.get_data()
def __UpperCamelCase ( lowerCAmelCase__ : MyNode ):
while True:
__a : Optional[int] = root.get_left()
if left_child is None:
break
__a : int = left_child
return root.get_data()
def __UpperCamelCase ( lowerCAmelCase__ : MyNode , lowerCAmelCase__ : Any ):
__a : Optional[Any] = root.get_left()
__a : List[str] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__a : str = get_left_most(lowerCAmelCase__ )
root.set_data(lowerCAmelCase__ )
root.set_right(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
elif left_child is not None:
__a : int = left_child
elif right_child is not None:
__a : List[Any] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
if get_height(lowerCAmelCase__ ) - get_height(lowerCAmelCase__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__a : List[Any] = left_rotation(lowerCAmelCase__ )
else:
__a : Union[str, Any] = rl_rotation(lowerCAmelCase__ )
elif get_height(lowerCAmelCase__ ) - get_height(lowerCAmelCase__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__a : int = right_rotation(lowerCAmelCase__ )
else:
__a : Tuple = lr_rotation(lowerCAmelCase__ )
__a : str = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowerCAmelCase__ )
return root
class UpperCamelCase__ :
def __init__(self : Optional[Any] ):
__a : MyNode | None = None
def lowerCAmelCase (self : List[Any] ):
return get_height(self.root )
def lowerCAmelCase (self : Any , snake_case_ : Any ):
print('''insert:''' + str(snake_case_ ) )
__a : List[Any] = insert_node(self.root , snake_case_ )
def lowerCAmelCase (self : Dict , snake_case_ : Any ):
print('''delete:''' + str(snake_case_ ) )
if self.root is None:
print('''Tree is empty!''' )
return
__a : Union[str, Any] = del_node(self.root , snake_case_ )
def __str__(self : List[str] , ): # a level traversale, gives a more intuitive look on the tree
__a : Union[str, Any] = ''''''
__a : int = MyQueue()
q.push(self.root )
__a : List[str] = self.get_height()
if layer == 0:
return output
__a : List[Any] = 0
while not q.is_empty():
__a : List[str] = q.pop()
__a : Optional[int] = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(snake_case_ )
q.push(snake_case_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__a : int = cnt + 1
for i in range(1_0_0 ):
if cnt == math.pow(2 , snake_case_ ) - 1:
__a : str = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __UpperCamelCase ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowercase__ =AVLtree()
lowercase__ =list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 216 | 1 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :Tuple = 1.5
lowerCAmelCase__ :Optional[int] = int(factor * num_class_images )
lowerCAmelCase__ :str = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 )
os.makedirs(F"{class_data_dir}/images" , exist_ok=_SCREAMING_SNAKE_CASE )
if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
lowerCAmelCase__ :List[Any] = client.query(text=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowerCAmelCase__ :List[str] = int(factor * num_images )
lowerCAmelCase__ :int = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , )
lowerCAmelCase__ :Dict = 0
lowerCAmelCase__ :Any = 0
lowerCAmelCase__ :Union[str, Any] = tqdm(desc='downloading real regularization images' , total=_SCREAMING_SNAKE_CASE )
with open(F"{class_data_dir}/caption.txt" , 'w' ) as fa, open(F"{class_data_dir}/urls.txt" , 'w' ) as fa, open(
F"{class_data_dir}/images.txt" , 'w' ) as fa:
while total < num_class_images:
lowerCAmelCase__ :Tuple = class_images[count]
count += 1
try:
lowerCAmelCase__ :List[str] = requests.get(images['url'] )
if img.status_code == 200:
lowerCAmelCase__ :Any = Image.open(BytesIO(img.content ) )
with open(F"{class_data_dir}/images/{total}.jpg" , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F"{class_data_dir}/images/{total}.jpg" + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __A () ->Any:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = argparse.ArgumentParser('' , add_help=_SCREAMING_SNAKE_CASE )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument('--class_data_dir' , help='path to save images' , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=_SCREAMING_SNAKE_CASE )
return parser.parse_args()
if __name__ == "__main__":
__A = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 364 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A = """pt"""
elif is_tf_available():
__A = """tf"""
else:
__A = """jax"""
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Dict = ByTaTokenizer
__magic_name__ :str = False
def snake_case ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ :Tuple = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2_0 , __UpperCAmelCase=5 ):
'''simple docstring'''
lowerCAmelCase__ :Dict = []
for i in range(len(__UpperCAmelCase ) ):
try:
lowerCAmelCase__ :Union[str, Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=__UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase__ :str = list(filter(lambda __UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , __UpperCAmelCase ) )
lowerCAmelCase__ :Tuple = list(filter(lambda __UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__UpperCAmelCase ) , __UpperCAmelCase ) )
if max_length is not None and len(__UpperCAmelCase ) > max_length:
lowerCAmelCase__ :Optional[int] = toks[:max_length]
if min_length is not None and len(__UpperCAmelCase ) < min_length and len(__UpperCAmelCase ) > 0:
while len(__UpperCAmelCase ) < min_length:
lowerCAmelCase__ :List[str] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ :int = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ :Optional[Any] = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
if " " not in output_txt and len(__UpperCAmelCase ) > 1:
lowerCAmelCase__ :int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__UpperCAmelCase )
)
if with_prefix_space:
lowerCAmelCase__ :Dict = ' ' + output_txt
lowerCAmelCase__ :Dict = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
return output_txt, output_ids
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.ta_base_tokenizer
lowerCAmelCase__ :str = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
lowerCAmelCase__ :Any = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.ta_base_tokenizer
lowerCAmelCase__ :int = 'Unicode €.'
lowerCAmelCase__ :Optional[int] = tokenizer(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded['input_ids'] , __UpperCAmelCase )
# decoding
lowerCAmelCase__ :Dict = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , 'Unicode €.</s>' )
lowerCAmelCase__ :Tuple = tokenizer('e è é ê ë' )
lowerCAmelCase__ :Any = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded['input_ids'] , __UpperCAmelCase )
# decoding
lowerCAmelCase__ :List[Any] = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.ta_base_tokenizer
lowerCAmelCase__ :Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCAmelCase__ :Dict = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
if FRAMEWORK != "jax":
lowerCAmelCase__ :Dict = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase__ :Dict = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.ta_base_tokenizer
lowerCAmelCase__ :Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , __UpperCAmelCase )
self.assertIn('attention_mask' , __UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , __UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.ta_base_tokenizer
lowerCAmelCase__ :Tuple = [
'Summary of the text.',
'Another summary.',
]
lowerCAmelCase__ :Union[str, Any] = tokenizer(
text_target=__UpperCAmelCase , max_length=3_2 , padding='max_length' , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.ta_base_tokenizer
lowerCAmelCase__ :int = ['A long paragraph for summarization. </s>']
lowerCAmelCase__ :Tuple = ['Summary of the text. </s>']
# fmt: off
lowerCAmelCase__ :Union[str, Any] = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
lowerCAmelCase__ :Dict = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
lowerCAmelCase__ :List[Any] = tokenizer(__UpperCAmelCase , text_target=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , batch['input_ids'][0] )
self.assertEqual(__UpperCAmelCase , batch['labels'][0] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase__ :List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ :int = tempfile.mkdtemp()
lowerCAmelCase__ :Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
lowerCAmelCase__ :str = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
shutil.rmtree(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ :Any = tempfile.mkdtemp()
lowerCAmelCase__ :Any = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCAmelCase__ :Dict = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCAmelCase__ :Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase__ :Any = tokenizer.__class__.from_pretrained(__UpperCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase__ :List[str] = json.load(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase__ :Tuple = json.load(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = [F"<extra_id_{i}>" for i in range(1_2_5 )]
lowerCAmelCase__ :List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCAmelCase__ :Union[str, Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(__UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase__ :Tuple = tokenizer_class.from_pretrained(
__UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase__ :Optional[int] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=__UpperCAmelCase )]
lowerCAmelCase__ :List[str] = tokenizer_class.from_pretrained(
__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer_class.from_pretrained(__UpperCAmelCase )
self.assertTrue(tokenizer.decode([2_5_5] ) == '' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.get_tokenizers(fast=__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCAmelCase__ :List[Any] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
lowerCAmelCase__ :List[Any] = tokenizer.convert_tokens_to_string(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCAmelCase__ :Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
lowerCAmelCase__ :str = 0
lowerCAmelCase__ :Dict = tokenizer.convert_ids_to_tokens(
__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
for attr in attributes_list:
setattr(__UpperCAmelCase , attr + '_id' , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , attr + '_id' ) , __UpperCAmelCase )
setattr(__UpperCAmelCase , attr + '_id' , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , attr + '_id' ) , __UpperCAmelCase )
setattr(__UpperCAmelCase , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens_ids' ) , [] )
setattr(__UpperCAmelCase , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 254 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = "src/diffusers"
UpperCAmelCase__ : str = "."
# This is to make sure the diffusers module imported is the one in the repo.
UpperCAmelCase__ : Tuple = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCAmelCase__ : List[str] = spec.loader.load_module()
def lowerCamelCase__ ( a , a ) -> Tuple:
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , __lowerCamelCase ) is not None
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: Optional[int] = object_name.split('''.''' )
_A: List[Any] = 0
# First let's find the module where our object lives.
_A: Optional[Any] = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase , f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
_A: List[str] = os.path.join(__lowerCamelCase , parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase , f"""{module}.py""" ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A: Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
_A: List[str] = """"""
_A: int = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_A: List[str] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index] , __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_A: Dict = lines[start_index:line_index]
return "".join(__lowerCamelCase )
UpperCAmelCase__ : Any = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCAmelCase__ : Optional[int] = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCAmelCase__ : Dict = re.compile(R'<FILL\s+[^>]*>')
def lowerCamelCase__ ( a ) -> Union[str, Any]:
_A: Optional[Any] = code.split('''\n''' )
_A: str = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ ( a ) -> Dict:
_A: Tuple = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
_A: Optional[Any] = f"""class Bla:\n{code}"""
_A: Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=__lowerCamelCase )
_A: Dict = black.format_str(__lowerCamelCase , mode=__lowerCamelCase )
_A: Any = style_docstrings_in_code(__lowerCamelCase )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def lowerCamelCase__ ( a , a=False ) -> str:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A: Optional[Any] = f.readlines()
_A: Optional[int] = []
_A: str = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
_A: Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_A: Union[str, Any] = search.groups()
_A: Any = find_code_in_diffusers(__lowerCamelCase )
_A: Optional[int] = get_indent(__lowerCamelCase )
_A: Tuple = line_index + 1 if indent == theoretical_indent else line_index + 2
_A: Any = theoretical_indent
_A: Any = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_A: int = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
_A: List[Any] = lines[line_index]
_A: str = _should_continue(__lowerCamelCase , __lowerCamelCase ) and re.search(f"""^{indent}# End copy""" , __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_A: Optional[int] = lines[start_index:line_index]
_A: int = """""".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_A: Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(__lowerCamelCase ) is None]
_A: List[Any] = """\n""".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
_A: List[str] = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
_A: Any = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_A: Union[str, Any] = pattern.groups()
_A: List[str] = re.sub(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if option.strip() == "all-casing":
_A: List[Any] = re.sub(obja.lower() , obja.lower() , __lowerCamelCase )
_A: int = re.sub(obja.upper() , obja.upper() , __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_A: Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
_A: Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_A: int = lines[:start_index] + [theoretical_code] + lines[line_index:]
_A: Union[str, Any] = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ ( a = False ) -> int:
_A: Tuple = glob.glob(os.path.join(__lowerCamelCase , '''**/*.py''' ) , recursive=__lowerCamelCase )
_A: Optional[int] = []
for filename in all_files:
_A: str = is_copy_consistent(__lowerCamelCase , __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
_A: Union[str, Any] = """\n""".join(__lowerCamelCase )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCAmelCase__ : Optional[int] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 121 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
a : Optional[Any] = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
a : Optional[Any] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : str = 'maskformer'
a : Dict = {'hidden_size': 'mask_feature_size'}
a : Optional[Any] = ['resnet', 'swin']
a : List[Any] = ['detr']
def __init__( self : Optional[int] , __lowercase : int = 256 , __lowercase : int = 256 , __lowercase : float = 0.1 , __lowercase : bool = False , __lowercase : Optional[Dict] = None , __lowercase : Optional[Dict] = None , __lowercase : float = 0.02 , __lowercase : float = 1.0 , __lowercase : float = 1.0 , __lowercase : float = 1.0 , __lowercase : float = 20.0 , __lowercase : Optional[bool] = None , **__lowercase : Tuple , ) -> str:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__UpperCAmelCase : List[str] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Dict = backbone_config.pop("""model_type""" )
__UpperCAmelCase : Tuple = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__UpperCAmelCase : List[Any] = DetrConfig()
else:
# verify that the decoder is supported
__UpperCAmelCase : List[Any] = (
decoder_config.pop("""model_type""" ) if isinstance(__lowercase , __lowercase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Dict = CONFIG_MAPPING[decoder_type]
__UpperCAmelCase : Union[str, Any] = config_class.from_dict(__lowercase )
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : List[str] = decoder_config
# main feature dimension for the model
__UpperCAmelCase : Union[str, Any] = fpn_feature_size
__UpperCAmelCase : Optional[Any] = mask_feature_size
# initializer
__UpperCAmelCase : int = init_std
__UpperCAmelCase : Any = init_xavier_std
# Hungarian matcher && loss
__UpperCAmelCase : Any = cross_entropy_weight
__UpperCAmelCase : Optional[Any] = dice_weight
__UpperCAmelCase : List[str] = mask_weight
__UpperCAmelCase : Union[str, Any] = use_auxiliary_loss
__UpperCAmelCase : int = no_object_weight
__UpperCAmelCase : int = output_auxiliary_logits
__UpperCAmelCase : Optional[Any] = self.decoder_config.encoder_attention_heads
__UpperCAmelCase : Dict = self.decoder_config.num_hidden_layers
super().__init__(**__lowercase )
@classmethod
def UpperCAmelCase ( cls : int , __lowercase : PretrainedConfig , __lowercase : PretrainedConfig , **__lowercase : str ) -> Tuple:
return cls(
backbone_config=__lowercase , decoder_config=__lowercase , **__lowercase , )
def UpperCAmelCase ( self : List[Any] ) -> Dict[str, any]:
__UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Optional[int] = self.backbone_config.to_dict()
__UpperCAmelCase : Any = self.decoder_config.to_dict()
__UpperCAmelCase : Tuple = self.__class__.model_type
return output
| 114 | 0 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.ModuleList(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(__UpperCAmelCase , __UpperCAmelCase , self.nets ) ):
__UpperCamelCase , __UpperCamelCase = controlnet(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
# merge samples
if i == 0:
__UpperCamelCase , __UpperCamelCase = down_samples, mid_sample
else:
__UpperCamelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__UpperCAmelCase , __UpperCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__UpperCAmelCase , is_main_process=__UpperCAmelCase , save_function=__UpperCAmelCase , safe_serialization=__UpperCAmelCase , variant=__UpperCAmelCase , )
idx += 1
__UpperCamelCase = model_path_to_save + F'_{idx}'
@classmethod
def UpperCAmelCase ( cls , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__UpperCamelCase = pretrained_model_path
while os.path.isdir(__UpperCAmelCase ):
__UpperCamelCase = ControlNetModel.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
controlnets.append(__UpperCAmelCase )
idx += 1
__UpperCamelCase = pretrained_model_path + F'_{idx}'
logger.info(F'{len(__UpperCAmelCase )} controlnets loaded from {pretrained_model_path}.' )
if len(__UpperCAmelCase ) == 0:
raise ValueError(
F'No ControlNets found under {os.path.dirname(__UpperCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(__UpperCAmelCase )
| 357 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def A ( ) -> Union[str, Any]:
__UpperCamelCase = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
__UpperCamelCase = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(snake_case )
DownloadCommand.register_subcommand(snake_case )
EnvironmentCommand.register_subcommand(snake_case )
RunCommand.register_subcommand(snake_case )
ServeCommand.register_subcommand(snake_case )
UserCommands.register_subcommand(snake_case )
AddNewModelCommand.register_subcommand(snake_case )
AddNewModelLikeCommand.register_subcommand(snake_case )
LfsCommands.register_subcommand(snake_case )
PTtoTFCommand.register_subcommand(snake_case )
# Let's go
__UpperCamelCase = parser.parse_args()
if not hasattr(snake_case , 'func' ):
parser.print_help()
exit(1 )
# Run
__UpperCamelCase = args.func(snake_case )
service.run()
if __name__ == "__main__":
main()
| 263 | 0 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__A ="\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__A ="\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__A ="\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def a ( _UpperCAmelCase : Optional[int] ):
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Any ):
__UpperCAmelCase : Dict = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(_UpperCAmelCase , ''' ''' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Tuple ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : str ):
__UpperCAmelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ):
'''simple docstring'''
return int(normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase ) )
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [any(compute_exact(_UpperCAmelCase , _UpperCAmelCase ) for ref in refs ) for pred, refs in zip(_UpperCAmelCase , _UpperCAmelCase )]
return (sum(_UpperCAmelCase ) / len(_UpperCAmelCase )) * 1_00
def a ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[str] = [rgram for rgrams in rgramslist for rgram in rgrams]
__UpperCAmelCase : List[str] = Counter(_UpperCAmelCase )
__UpperCAmelCase : Any = Counter(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = Counter()
for sgram, scount in sgramcounter.items():
__UpperCAmelCase : Dict = scount * numref
__UpperCAmelCase : str = Counter(_UpperCAmelCase )
__UpperCAmelCase : Dict = Counter()
for cgram, ccount in cgramcounter.items():
__UpperCAmelCase : Union[str, Any] = ccount * numref
# KEEP
__UpperCAmelCase : Tuple = sgramcounter_rep & cgramcounter_rep
__UpperCAmelCase : Dict = keepgramcounter_rep & rgramcounter
__UpperCAmelCase : Any = sgramcounter_rep & rgramcounter
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Dict = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCAmelCase : Union[str, Any] = 1
__UpperCAmelCase : Optional[Any] = 1
if len(_UpperCAmelCase ) > 0:
__UpperCAmelCase : Union[str, Any] = keeptmpscorea / len(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__UpperCAmelCase : Optional[int] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__UpperCAmelCase : Union[str, Any] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__UpperCAmelCase : Union[str, Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__UpperCAmelCase : Optional[Any] = sgramcounter_rep - cgramcounter_rep
__UpperCAmelCase : List[Any] = delgramcounter_rep - rgramcounter
__UpperCAmelCase : List[Any] = sgramcounter_rep - rgramcounter
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[str] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCAmelCase : List[Any] = 1
if len(_UpperCAmelCase ) > 0:
__UpperCAmelCase : Union[str, Any] = deltmpscorea / len(_UpperCAmelCase )
# ADDITION
__UpperCAmelCase : Dict = set(_UpperCAmelCase ) - set(_UpperCAmelCase )
__UpperCAmelCase : Optional[int] = set(_UpperCAmelCase ) & set(_UpperCAmelCase )
__UpperCAmelCase : List[str] = set(_UpperCAmelCase ) - set(_UpperCAmelCase )
__UpperCAmelCase : Tuple = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : List[str] = 1
if len(_UpperCAmelCase ) > 0:
__UpperCAmelCase : Dict = addtmpscore / len(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
__UpperCAmelCase : Union[str, Any] = addtmpscore / len(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = 0
if addscore_precision > 0 or addscore_recall > 0:
__UpperCAmelCase : Optional[int] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def a ( _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = len(_UpperCAmelCase )
__UpperCAmelCase : int = ssent.split(''' ''' )
__UpperCAmelCase : Dict = csent.split(''' ''' )
__UpperCAmelCase : Any = []
__UpperCAmelCase : int = []
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : str = []
__UpperCAmelCase : str = []
__UpperCAmelCase : str = []
__UpperCAmelCase : Any = []
__UpperCAmelCase : str = []
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Tuple = []
for rsent in rsents:
__UpperCAmelCase : List[Any] = rsent.split(''' ''' )
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[Any] = []
ragramslist.append(_UpperCAmelCase )
for i in range(0 , len(_UpperCAmelCase ) - 1 ):
if i < len(_UpperCAmelCase ) - 1:
__UpperCAmelCase : Optional[Any] = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_UpperCAmelCase )
if i < len(_UpperCAmelCase ) - 2:
__UpperCAmelCase : Any = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_UpperCAmelCase )
if i < len(_UpperCAmelCase ) - 3:
__UpperCAmelCase : List[str] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_UpperCAmelCase )
ragramslist.append(_UpperCAmelCase )
ragramslist.append(_UpperCAmelCase )
ragramslist.append(_UpperCAmelCase )
for i in range(0 , len(_UpperCAmelCase ) - 1 ):
if i < len(_UpperCAmelCase ) - 1:
__UpperCAmelCase : List[str] = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_UpperCAmelCase )
if i < len(_UpperCAmelCase ) - 2:
__UpperCAmelCase : Tuple = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_UpperCAmelCase )
if i < len(_UpperCAmelCase ) - 3:
__UpperCAmelCase : Tuple = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_UpperCAmelCase )
for i in range(0 , len(_UpperCAmelCase ) - 1 ):
if i < len(_UpperCAmelCase ) - 1:
__UpperCAmelCase : Any = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_UpperCAmelCase )
if i < len(_UpperCAmelCase ) - 2:
__UpperCAmelCase : Any = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_UpperCAmelCase )
if i < len(_UpperCAmelCase ) - 3:
__UpperCAmelCase : Any = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_UpperCAmelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : List[str] = SARIngram(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : int = SARIngram(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Union[str, Any] = SARIngram(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[int] = SARIngram(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__UpperCAmelCase : List[str] = sum([delascore, delascore, delascore, delascore] ) / 4
__UpperCAmelCase : str = sum([addascore, addascore, addascore, addascore] ) / 4
__UpperCAmelCase : Optional[int] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : bool = True , _UpperCAmelCase : str = "13a" , _UpperCAmelCase : bool = True ):
'''simple docstring'''
if lowercase:
__UpperCAmelCase : List[str] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__UpperCAmelCase : List[str] = sacrebleu.metrics.bleu._get_tokenizer(_UpperCAmelCase )()(_UpperCAmelCase )
else:
__UpperCAmelCase : List[str] = sacrebleu.TOKENIZERS[tokenizer]()(_UpperCAmelCase )
elif tokenizer == "moses":
__UpperCAmelCase : Optional[int] = sacremoses.MosesTokenizer().tokenize(_UpperCAmelCase , return_str=_UpperCAmelCase , escape=_UpperCAmelCase )
elif tokenizer == "penn":
__UpperCAmelCase : Dict = sacremoses.MosesTokenizer().penn_tokenize(_UpperCAmelCase , return_str=_UpperCAmelCase )
else:
__UpperCAmelCase : List[str] = sentence
if not return_str:
__UpperCAmelCase : List[str] = normalized_sent.split()
return normalized_sent
def a ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
if not (len(_UpperCAmelCase ) == len(_UpperCAmelCase ) == len(_UpperCAmelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
__UpperCAmelCase : Optional[Any] = 0
for src, pred, refs in zip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
sari_score += SARIsent(normalize(_UpperCAmelCase ) , normalize(_UpperCAmelCase ) , [normalize(_UpperCAmelCase ) for sent in refs] )
__UpperCAmelCase : Union[str, Any] = sari_score / len(_UpperCAmelCase )
return 1_00 * sari_score
def a ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple="exp" , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Any=False , _UpperCAmelCase : Optional[int]=False , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__UpperCAmelCase : Dict = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
__UpperCAmelCase : Optional[int] = sacrebleu.corpus_bleu(
_UpperCAmelCase , _UpperCAmelCase , smooth_method=_UpperCAmelCase , smooth_value=_UpperCAmelCase , force=_UpperCAmelCase , lowercase=_UpperCAmelCase , use_effective_order=_UpperCAmelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def snake_case__ ( self : Tuple , a_ : Any , a_ : Dict , a_ : str ):
'''simple docstring'''
__UpperCAmelCase : Tuple = {}
result.update({'''sari''': compute_sari(sources=a_ , predictions=a_ , references=a_ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=a_ , references=a_ )} )
result.update({'''exact''': compute_em(predictions=a_ , references=a_ )} )
return result
| 226 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 226 | 1 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
a : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(snake_case__ )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
lowercase__ : Any= {}
lowercase__ : List[Any]= {}
lowercase__ : Dict= {}
# preprocess args
if "points_per_batch" in kwargs:
lowercase__ : Optional[Any]= kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
lowercase__ : Any= kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
lowercase__ : Dict= kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
lowercase__ : Any= kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
lowercase__ : Optional[int]= kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
lowercase__ : List[str]= kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
lowercase__ : Any= kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
lowercase__ : Tuple= kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
lowercase__ : str= kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
lowercase__ : Tuple= kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
lowercase__ : List[Any]= kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
lowercase__ : Dict= kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , snake_case__ , *snake_case__ , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
return super().__call__(snake_case__ , *snake_case__ , num_workers=snake_case__ , batch_size=snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=64 , snake_case__ = 0 , snake_case__ = 512 / 1500 , snake_case__ = 32 , snake_case__ = 1 , ):
'''simple docstring'''
lowercase__ : Optional[int]= load_image(snake_case__ )
lowercase__ : Optional[int]= self.image_processor.size["longest_edge"]
lowercase__ : List[Any]= self.image_processor.generate_crop_boxes(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase__ : Optional[Any]= self.image_processor(images=snake_case__ , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
lowercase__ : str= self.get_inference_context()
with inference_context():
lowercase__ : int= self._ensure_tensor_on_device(snake_case__ , device=self.device )
lowercase__ : int= self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
lowercase__ : Dict= image_embeddings
lowercase__ : List[Any]= grid_points.shape[1]
lowercase__ : Optional[int]= points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , snake_case__ , snake_case__ ):
lowercase__ : Union[str, Any]= grid_points[:, i : i + points_per_batch, :, :]
lowercase__ : List[Any]= input_labels[:, i : i + points_per_batch]
lowercase__ : List[Any]= i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=0.88 , snake_case__=0.95 , snake_case__=0 , snake_case__=1 , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= model_inputs.pop("input_boxes" )
lowercase__ : Any= model_inputs.pop("is_last" )
lowercase__ : Tuple= model_inputs.pop("original_sizes" ).tolist()
lowercase__ : Optional[int]= model_inputs.pop("reshaped_input_sizes" ).tolist()
lowercase__ : List[str]= self.model(**snake_case__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowercase__ : Tuple= model_outputs["pred_masks"]
lowercase__ : Tuple= self.image_processor.post_process_masks(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , binarize=snake_case__ )
lowercase__ : Dict= model_outputs["iou_scores"]
lowercase__ : Any= self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=0.7 , ):
'''simple docstring'''
lowercase__ : Any= []
lowercase__ : Tuple= []
lowercase__ : List[Any]= []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
lowercase__ : Dict= torch.cat(snake_case__ )
lowercase__ : List[Any]= torch.cat(snake_case__ )
lowercase__ : Dict= self.image_processor.post_process_for_mask_generation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase__ : str= defaultdict(snake_case__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(snake_case__ )
lowercase__ : Tuple= {}
if output_rle_mask:
lowercase__ : Optional[int]= rle_mask
if output_bboxes_mask:
lowercase__ : Optional[Any]= bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 367 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class A_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase_ :int ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =value
lowerCamelCase__ : Node | None =None
lowerCamelCase__ : Node | None =None
class A_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase_ :Node ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =tree
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :Node | None ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self :Optional[Any] ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 126 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ ( A__ ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase_ :VQModel , lowerCamelCase_ :UNetaDModel , lowerCamelCase_ :DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self :Any , lowerCamelCase_ :int = 1 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , **lowerCamelCase_ :Optional[int] , ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase_ , )
lowerCamelCase__ : List[Any] =latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase__ : int =latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCamelCase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase__ : int ='eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase__ : Optional[int] ={}
if accepts_eta:
lowerCamelCase__ : Dict =eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase__ : Union[str, Any] =self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# predict the noise residual
lowerCamelCase__ : List[str] =self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Optional[Any] =self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase__ : Tuple =self.vqvae.decode(lowerCamelCase_ ).sample
lowerCamelCase__ : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase__ : List[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase__ : int =self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ ) | 126 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowercase_ :
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : str , ):
_A = parent
_A = 13
_A = 7
_A = 30
_A = self.seq_length + self.mem_len
_A = 15
_A = True
_A = True
_A = 99
_A = [10, 50, 80]
_A = 32
_A = 32
_A = 4
_A = 8
_A = 128
_A = 2
_A = 2
_A = None
_A = 1
_A = 0
_A = 3
_A = self.vocab_size - 1
_A = 0.01
def lowerCAmelCase_ ( self : List[Any] ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCAmelCase_ ( self : Optional[Any] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] ):
_A = TFTransfoXLModel(_UpperCAmelCase )
_A , _A = model(_UpperCAmelCase ).to_tuple()
_A = {'input_ids': input_ids_a, 'mems': mems_a}
_A , _A = model(_UpperCAmelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] ):
_A = TFTransfoXLLMHeadModel(_UpperCAmelCase )
_A , _A = model(_UpperCAmelCase ).to_tuple()
_A = {'input_ids': input_ids_a, 'labels': lm_labels}
_A , _A = model(_UpperCAmelCase ).to_tuple()
_A , _A = model([input_ids_a, mems_a] ).to_tuple()
_A = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
_A , _A = model(_UpperCAmelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] ):
_A = TFTransfoXLForSequenceClassification(_UpperCAmelCase )
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = self.prepare_config_and_inputs()
((_A) , (_A) , (_A) , (_A)) = config_and_inputs
_A = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase : Optional[Any] = () if is_tf_available() else ()
UpperCAmelCase : str = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
UpperCAmelCase : str = False
UpperCAmelCase : str = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Union[str, Any] = False
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCAmelCase_ ( self : List[Any] ):
_A = TFTransfoXLModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , d_embed=37 )
def lowerCAmelCase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : int ):
self.model_tester.set_seed()
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
self.model_tester.set_seed()
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_A = model.get_output_embeddings()
assert isinstance(_UpperCAmelCase , tf.keras.layers.Layer )
_A = model.get_bias()
assert name is None
else:
_A = model.get_output_embeddings()
assert x is None
_A = model.get_bias()
assert name is None
def lowerCAmelCase_ ( self : Union[str, Any] ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFTransfoXLModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def lowerCAmelCase_ ( self : Any ):
pass
@require_tf
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def lowerCAmelCase_ ( self : List[str] ):
_A = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
_A = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_A = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_A = model.generate(_UpperCAmelCase , max_length=200 , do_sample=_UpperCAmelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , _UpperCAmelCase )
| 271 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = (IPNDMScheduler,)
UpperCAmelCase : Optional[Any] = (('''num_inference_steps''', 50),)
def lowerCAmelCase_ ( self : Union[str, Any] , **_UpperCAmelCase : List[Any] ):
_A = {'num_train_timesteps': 1_000}
config.update(**_UpperCAmelCase )
return config
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] ):
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config(**_UpperCAmelCase )
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_A = dummy_past_residuals[:]
if time_step is None:
_A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_A = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_A = dummy_past_residuals[:]
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Any=0 , **_UpperCAmelCase : Any ):
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_A = dummy_past_residuals[:]
if time_step is None:
_A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_A = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_A = dummy_past_residuals[:]
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : List[str] , **_UpperCAmelCase : Optional[int] ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(**_UpperCAmelCase )
_A = scheduler_class(**_UpperCAmelCase )
_A = 10
_A = self.dummy_model()
_A = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , 'set_timesteps' ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , 'set_timesteps' ):
_A = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_A = dummy_past_residuals[:]
_A = scheduler.timesteps[5]
_A = scheduler.timesteps[6]
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ ( self : Tuple ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase , time_step=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase , time_step=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.full_loop()
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 271 | 1 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase__ ( __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = int(number**0.5 )
return number == sq * sq
def lowerCamelCase__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCAmelCase_ = x_den * y_den * z_den
lowerCAmelCase_ = gcd(__lowerCAmelCase , __lowerCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase__ ( __lowerCAmelCase : int = 35 ):
"""simple docstring"""
lowerCAmelCase_ = set()
lowerCAmelCase_ = 42
lowerCAmelCase_ = Fraction(0 )
lowerCAmelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowerCAmelCase_ = x_num * y_den + x_den * y_num
lowerCAmelCase_ = x_den * y_den
lowerCAmelCase_ = gcd(__lowerCAmelCase , __lowerCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ = add_three(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
unique_s.add(__lowerCAmelCase )
# n=2
lowerCAmelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCAmelCase_ = x_den * x_den * y_den * y_den
if is_sq(__lowerCAmelCase ) and is_sq(__lowerCAmelCase ):
lowerCAmelCase_ = int(sqrt(__lowerCAmelCase ) )
lowerCAmelCase_ = int(sqrt(__lowerCAmelCase ) )
lowerCAmelCase_ = gcd(__lowerCAmelCase , __lowerCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ = add_three(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
unique_s.add(__lowerCAmelCase )
# n=-1
lowerCAmelCase_ = x_num * y_num
lowerCAmelCase_ = x_den * y_num + x_num * y_den
lowerCAmelCase_ = gcd(__lowerCAmelCase , __lowerCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ = add_three(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
unique_s.add(__lowerCAmelCase )
# n=2
lowerCAmelCase_ = x_num * x_num * y_num * y_num
lowerCAmelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCAmelCase ) and is_sq(__lowerCAmelCase ):
lowerCAmelCase_ = int(sqrt(__lowerCAmelCase ) )
lowerCAmelCase_ = int(sqrt(__lowerCAmelCase ) )
lowerCAmelCase_ = gcd(__lowerCAmelCase , __lowerCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ = add_three(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
unique_s.add(__lowerCAmelCase )
for num, den in unique_s:
total += Fraction(__lowerCAmelCase , __lowerCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 231 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
lowerCAmelCase_ = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(__lowerCAmelCase )
DownloadCommand.register_subcommand(__lowerCAmelCase )
EnvironmentCommand.register_subcommand(__lowerCAmelCase )
RunCommand.register_subcommand(__lowerCAmelCase )
ServeCommand.register_subcommand(__lowerCAmelCase )
UserCommands.register_subcommand(__lowerCAmelCase )
AddNewModelCommand.register_subcommand(__lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCAmelCase )
LfsCommands.register_subcommand(__lowerCAmelCase )
PTtoTFCommand.register_subcommand(__lowerCAmelCase )
# Let's go
lowerCAmelCase_ = parser.parse_args()
if not hasattr(__lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase_ = args.func(__lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 231 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase : int = {"""vocab_file""": """vocab.json"""}
__UpperCAmelCase : Dict = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
__UpperCAmelCase : str = {"""mgp-str""": 27}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case__ , snake_case__="[GO]" , snake_case__="[GO]" , snake_case__="[s]" , snake_case__="[GO]" , **snake_case__ ):
'''simple docstring'''
super().__init__(
unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , **snake_case__ , )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
lowercase__ : Optional[Any]= json.load(snake_case__ )
lowercase__ : List[str]= {v: k for k, v in self.vocab.items()}
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= []
for s in text:
char_tokens.extend(snake_case__ )
return char_tokens
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return self.decoder.get(snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error("Vocabulary path ({}) should be a directory".format(snake_case__ ) )
return
lowercase__ : Any= os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
return (vocab_file,)
| 357 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a : Optional[Any] = """pt"""
elif is_tf_available():
a : Union[str, Any] = """tf"""
else:
a : Any = """jax"""
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = PerceiverTokenizer
__lowerCamelCase = False
def UpperCAmelCase_ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ : str= PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=False , snake_case__=20 , snake_case__=5 ):
'''simple docstring'''
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowercase__ : Union[str, Any]= []
for i in range(len(snake_case__ ) ):
try:
lowercase__ : Any= tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ : int= list(filter(lambda snake_case__ : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case__ ) )
lowercase__ : Union[str, Any]= list(filter(lambda snake_case__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case__ ) , snake_case__ ) )
if max_length is not None and len(snake_case__ ) > max_length:
lowercase__ : int= toks[:max_length]
if min_length is not None and len(snake_case__ ) < min_length and len(snake_case__ ) > 0:
while len(snake_case__ ) < min_length:
lowercase__ : List[str]= toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ : str= [t[0] for t in toks]
# Ensure consistency
lowercase__ : Optional[Any]= tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ )
if " " not in output_txt and len(snake_case__ ) > 1:
lowercase__ : Dict= (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case__ )
)
if with_prefix_space:
lowercase__ : List[Any]= " " + output_txt
lowercase__ : Any= tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
return output_txt, output_ids
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.perceiver_tokenizer
lowercase__ : Union[str, Any]= "Unicode €."
lowercase__ : Tuple= tokenizer(snake_case__ )
lowercase__ : Dict= [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , snake_case__ )
# decoding
lowercase__ : List[str]= tokenizer.decode(snake_case__ )
self.assertEqual(snake_case__ , "[CLS]Unicode €.[SEP]" )
lowercase__ : List[str]= tokenizer("e è é ê ë" )
lowercase__ : int= [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , snake_case__ )
# decoding
lowercase__ : Any= tokenizer.decode(snake_case__ )
self.assertEqual(snake_case__ , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.perceiver_tokenizer
lowercase__ : Tuple= ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ : List[Any]= [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowercase__ : Dict= tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
if FRAMEWORK != "jax":
lowercase__ : List[Any]= list(batch.input_ids.numpy()[0] )
else:
lowercase__ : str= list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case__ , snake_case__ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.perceiver_tokenizer
lowercase__ : int= ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : Union[str, Any]= tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , snake_case__ )
self.assertIn("attention_mask" , snake_case__ )
self.assertNotIn("decoder_input_ids" , snake_case__ )
self.assertNotIn("decoder_attention_mask" , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.perceiver_tokenizer
lowercase__ : int= [
"Summary of the text.",
"Another summary.",
]
lowercase__ : List[Any]= tokenizer(
text_target=snake_case__ , max_length=32 , padding="max_length" , truncation=snake_case__ , return_tensors=snake_case__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# safety check on max_len default value so we are sure the test works
lowercase__ : Union[str, Any]= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowercase__ : List[str]= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ : str= tempfile.mkdtemp()
lowercase__ : str= " He is very happy, UNwant\u00E9d,running"
lowercase__ : int= tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
tokenizer.save_pretrained(snake_case__ )
lowercase__ : List[str]= tokenizer.__class__.from_pretrained(snake_case__ )
lowercase__ : Tuple= after_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
shutil.rmtree(snake_case__ )
lowercase__ : List[str]= self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ : Tuple= tempfile.mkdtemp()
lowercase__ : List[Any]= " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ : Tuple= tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ : List[str]= tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
tokenizer.save_pretrained(snake_case__ )
lowercase__ : Tuple= tokenizer.__class__.from_pretrained(snake_case__ )
lowercase__ : Tuple= after_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowercase__ : Tuple= tokenizer.__class__.from_pretrained(snake_case__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
lowercase__ : Optional[Any]= json.load(snake_case__ )
with open(os.path.join(snake_case__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
lowercase__ : Optional[Any]= json.load(snake_case__ )
lowercase__ : List[str]= [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ : Optional[int]= added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ : Union[str, Any]= added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(snake_case__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case__ , snake_case__ )
with open(os.path.join(snake_case__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case__ , snake_case__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ : Optional[int]= tokenizer_class.from_pretrained(
snake_case__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ : int= added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case__ )]
lowercase__ : int= tokenizer_class.from_pretrained(
snake_case__ , additional_special_tokens=snake_case__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowercase__ : Optional[int]= self.get_tokenizers(fast=snake_case__ , do_lower_case=snake_case__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ : Optional[int]= ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
lowercase__ : Optional[int]= tokenizer.convert_tokens_to_string(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
| 150 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def __lowerCamelCase ( __a :int ) -> Tuple:
"""simple docstring"""
A__ = SwinvaConfig()
A__ = swinva_name.split("""_""" )
A__ = name_split[1]
if "to" in name_split[3]:
A__ = int(name_split[3][-3:] )
else:
A__ = int(name_split[3] )
if "to" in name_split[2]:
A__ = int(name_split[2][-2:] )
else:
A__ = int(name_split[2][6:] )
if model_size == "tiny":
A__ = 9_6
A__ = (2, 2, 6, 2)
A__ = (3, 6, 1_2, 2_4)
elif model_size == "small":
A__ = 9_6
A__ = (2, 2, 1_8, 2)
A__ = (3, 6, 1_2, 2_4)
elif model_size == "base":
A__ = 1_2_8
A__ = (2, 2, 1_8, 2)
A__ = (4, 8, 1_6, 3_2)
else:
A__ = 1_9_2
A__ = (2, 2, 1_8, 2)
A__ = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
A__ = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
A__ = 2_1_8_4_1
A__ = """huggingface/label-files"""
A__ = """imagenet-22k-id2label.json"""
A__ = json.load(open(hf_hub_download(__a , __a , repo_type="""dataset""" ) , """r""" ) )
A__ = {int(__a ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
else:
A__ = 1_0_0_0
A__ = """huggingface/label-files"""
A__ = """imagenet-1k-id2label.json"""
A__ = json.load(open(hf_hub_download(__a , __a , repo_type="""dataset""" ) , """r""" ) )
A__ = {int(__a ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = img_size
A__ = num_classes
A__ = embed_dim
A__ = depths
A__ = num_heads
A__ = window_size
return config
def __lowerCamelCase ( __a :List[str] ) -> Tuple:
"""simple docstring"""
if "patch_embed.proj" in name:
A__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
A__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
A__ = """encoder.""" + name
if "attn.proj" in name:
A__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
A__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
A__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
A__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
A__ = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
A__ = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
A__ = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
A__ = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
A__ = """layernorm.weight"""
if name == "norm.bias":
A__ = """layernorm.bias"""
if "head" in name:
A__ = name.replace("""head""" , """classifier""" )
else:
A__ = """swinv2.""" + name
return name
def __lowerCamelCase ( __a :Tuple , __a :List[str] ) -> int:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(__a )
if "mask" in key:
continue
elif "qkv" in key:
A__ = key.split(""".""" )
A__ = int(key_split[1] )
A__ = int(key_split[3] )
A__ = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[
dim : dim * 2
]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def __lowerCamelCase ( __a :str , __a :Optional[Any] ) -> Dict:
"""simple docstring"""
A__ = timm.create_model(__a , pretrained=__a )
timm_model.eval()
A__ = get_swinva_config(__a )
A__ = SwinvaForImageClassification(__a )
model.eval()
A__ = convert_state_dict(timm_model.state_dict() , __a )
model.load_state_dict(__a )
A__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
A__ = Image.open(requests.get(__a , stream=__a ).raw )
A__ = image_processor(images=__a , return_tensors="""pt""" )
A__ = timm_model(inputs["""pixel_values"""] )
A__ = model(**__a ).logits
assert torch.allclose(__a , __a , atol=1E-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__a )
model.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A : Optional[int] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 274 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A : Tuple = logging.getLogger(__name__)
def __lowerCamelCase ( __a :Optional[int] , __a :List[str] ) -> Tuple:
"""simple docstring"""
A__ = np.argmax(__a , axis=1 )
return np.sum(outputs == labels )
def __lowerCamelCase ( __a :Tuple ) -> Dict:
"""simple docstring"""
with open(__a , encoding="""utf_8""" ) as f:
A__ = csv.reader(__a )
A__ = []
next(__a ) # skip the first line
for line in tqdm(__a ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __lowerCamelCase ( __a :Optional[int] , __a :List[Any] , __a :Dict , __a :Optional[Any] , __a :Optional[Any] , __a :int ) -> Union[str, Any]:
"""simple docstring"""
A__ = []
for dataset in encoded_datasets:
A__ = len(__a )
A__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
A__ = np.zeros((n_batch, 2) , dtype=np.intaa )
A__ = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
A__ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__a ):
A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A__ = with_conta
A__ = with_conta
A__ = len(__a ) - 1
A__ = len(__a ) - 1
A__ = with_conta
A__ = with_conta
A__ = mc_label
A__ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__a ) for t in all_inputs ) )
return tensor_datasets
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=__a , default="""openai-gpt""" , help="""pretrained model name""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""" , default=__a , type=__a , required=__a , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=__a , default="""""" )
parser.add_argument("""--eval_dataset""" , type=__a , default="""""" )
parser.add_argument("""--seed""" , type=__a , default=4_2 )
parser.add_argument("""--num_train_epochs""" , type=__a , default=3 )
parser.add_argument("""--train_batch_size""" , type=__a , default=8 )
parser.add_argument("""--eval_batch_size""" , type=__a , default=1_6 )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__a , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , type=__a , default=1 )
parser.add_argument(
"""--max_steps""" , default=-1 , type=__a , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=__a , default=6.25E-5 )
parser.add_argument("""--warmup_steps""" , default=0 , type=__a , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""" , type=__a , default="""warmup_linear""" )
parser.add_argument("""--weight_decay""" , type=__a , default=0.01 )
parser.add_argument("""--lm_coef""" , type=__a , default=0.9 )
parser.add_argument("""--n_valid""" , type=__a , default=3_7_4 )
parser.add_argument("""--server_ip""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" )
A__ = parser.parse_args()
print(__a )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
A__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A__ = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(__a , __a ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
A__ = ["""_start_""", """_delimiter_""", """_classify_"""]
A__ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__a )
A__ = tokenizer.convert_tokens_to_ids(__a )
A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__a ) )
model.to(__a )
# Load and encode the datasets
def tokenize_and_encode(__a :Tuple ):
if isinstance(__a , __a ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__a ) )
elif isinstance(__a , __a ):
return obj
return [tokenize_and_encode(__a ) for o in obj]
logger.info("""Encoding dataset...""" )
A__ = load_rocstories_dataset(args.train_dataset )
A__ = load_rocstories_dataset(args.eval_dataset )
A__ = (train_dataset, eval_dataset)
A__ = tokenize_and_encode(__a )
# Compute the max input length for the Transformer
A__ = model.config.n_positions // 2 - 2
A__ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
A__ = min(__a , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
A__ = pre_process_datasets(__a , __a , __a , *__a )
A__ , A__ = tensor_datasets[0], tensor_datasets[1]
A__ = TensorDataset(*__a )
A__ = RandomSampler(__a )
A__ = DataLoader(__a , sampler=__a , batch_size=args.train_batch_size )
A__ = TensorDataset(*__a )
A__ = SequentialSampler(__a )
A__ = DataLoader(__a , sampler=__a , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
A__ = args.max_steps
A__ = args.max_steps // (len(__a ) // args.gradient_accumulation_steps) + 1
else:
A__ = len(__a ) // args.gradient_accumulation_steps * args.num_train_epochs
A__ = list(model.named_parameters() )
A__ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
A__ = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
A__ = AdamW(__a , lr=args.learning_rate , eps=args.adam_epsilon )
A__ = get_linear_schedule_with_warmup(
__a , num_warmup_steps=args.warmup_steps , num_training_steps=__a )
if args.do_train:
A__ , A__ , A__ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ):
A__ = 0
A__ = 0
A__ = tqdm(__a , desc="""Training""" )
for step, batch in enumerate(__a ):
A__ = tuple(t.to(__a ) for t in batch )
A__ , A__ , A__ , A__ = batch
A__ = model(__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
A__ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
A__ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
A__ = """Training loss: {:.2e} lr: {:.2e}""".format(__a , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
A__ = model.module if hasattr(__a , """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
A__ = os.path.join(args.output_dir , __a )
A__ = os.path.join(args.output_dir , __a )
torch.save(model_to_save.state_dict() , __a )
model_to_save.config.to_json_file(__a )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
A__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__a )
if args.do_eval:
model.eval()
A__ , A__ = 0, 0
A__ , A__ = 0, 0
for batch in tqdm(__a , desc="""Evaluating""" ):
A__ = tuple(t.to(__a ) for t in batch )
A__ , A__ , A__ , A__ = batch
with torch.no_grad():
A__ , A__ , A__ , A__ = model(
__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
A__ = mc_logits.detach().cpu().numpy()
A__ = mc_labels.to("""cpu""" ).numpy()
A__ = accuracy(__a , __a )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
A__ = eval_loss / nb_eval_steps
A__ = eval_accuracy / nb_eval_examples
A__ = tr_loss / nb_tr_steps if args.do_train else None
A__ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
A__ = os.path.join(args.output_dir , """eval_results.txt""" )
with open(__a , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , __a , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 274 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
"""simple docstring"""
try:
snake_case_ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case_ : str = default
else:
# KEY is set, convert it to True or False.
try:
snake_case_ : int = strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
lowerCAmelCase_ = parse_flag_from_env('''RUN_SLOW''', default=False)
lowerCAmelCase_ = parse_flag_from_env('''RUN_REMOTE''', default=False)
lowerCAmelCase_ = parse_flag_from_env('''RUN_LOCAL''', default=True)
lowerCAmelCase_ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
lowerCAmelCase_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
lowerCAmelCase_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
lowerCAmelCase_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
lowerCAmelCase_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
lowerCAmelCase_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
lowerCAmelCase_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
lowerCAmelCase_ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
snake_case_ : int = unittest.skip('''test requires faiss''' )(_UpperCamelCase )
return test_case
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
snake_case_ : Union[str, Any] = unittest.skip('''test requires regex''' )(_UpperCamelCase )
return test_case
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
snake_case_ : Any = unittest.skip('''test requires elasticsearch''' )(_UpperCamelCase )
return test_case
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
snake_case_ : Union[str, Any] = unittest.skip('''test requires sqlalchemy''' )(_UpperCamelCase )
return test_case
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
snake_case_ : Optional[int] = unittest.skip('''test requires PyTorch''' )(_UpperCamelCase )
return test_case
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
snake_case_ : Dict = unittest.skip('''test requires TensorFlow''' )(_UpperCamelCase )
return test_case
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not config.JAX_AVAILABLE:
snake_case_ : List[str] = unittest.skip('''test requires JAX''' )(_UpperCamelCase )
return test_case
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
if not config.PIL_AVAILABLE:
snake_case_ : Union[str, Any] = unittest.skip('''test requires Pillow''' )(_UpperCamelCase )
return test_case
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_UpperCamelCase )
else:
return test_case
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_UpperCamelCase )
else:
return test_case
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_UpperCamelCase )
else:
return test_case
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
def _require_spacy_model(_UpperCamelCase ):
try:
import spacy # noqa F401
spacy.load(_UpperCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_UpperCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_UpperCamelCase ) )(_UpperCamelCase )
else:
return test_case
return _require_spacy_model
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_UpperCamelCase )
else:
return test_case
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_UpperCamelCase )
else:
return test_case
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
snake_case_ : int = unittest.skip('''test is slow''' )(_UpperCamelCase )
return test_case
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
snake_case_ : Union[str, Any] = unittest.skip('''test is local''' )(_UpperCamelCase )
return test_case
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
snake_case_ : Optional[Any] = unittest.skip('''test is packaged''' )(_UpperCamelCase )
return test_case
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
snake_case_ : Optional[Any] = unittest.skip('''test requires remote''' )(_UpperCamelCase )
return test_case
def lowerCamelCase_ ( *_UpperCamelCase ) -> str:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_UpperCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
snake_case_ : List[Any] = decorator(_UpperCamelCase )
setattr(cls , _UpperCamelCase , _UpperCamelCase )
return cls
return decorate
class __lowerCAmelCase ( _a ):
pass
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = 0
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : List[str] = 2
@contextmanager
def lowerCamelCase_ ( _UpperCamelCase=OfflineSimulationMode.CONNECTION_FAILS , _UpperCamelCase=1E-16 ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = requests.Session().request
def timeout_request(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ):
# Change the url to an invalid url so that the connection hangs
snake_case_ : Optional[Any] = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
snake_case_ : Tuple = timeout
try:
return online_request(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
snake_case_ : Union[str, Any] = url
snake_case_ : Optional[int] = e.args[0]
snake_case_ : Optional[Any] = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
snake_case_ : Any = (max_retry_error,)
raise
def raise_connection_error(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_UpperCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _UpperCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _UpperCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def lowerCamelCase_ ( *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_UpperCamelCase , **_UpperCamelCase ) as tmp_dir:
try:
os.chdir(_UpperCamelCase )
yield
finally:
os.chdir(_UpperCamelCase )
@contextmanager
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
snake_case_ : Optional[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
import gc
gc.collect()
snake_case_ : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return deepcopy(_UpperCamelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(_UpperCamelCase ).integers(0 , 100 , 10 ).tolist()
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ):
try:
return func(*_UpperCamelCase , **_UpperCamelCase )
except HTTPError as err:
if str(_UpperCamelCase ).startswith('''500''' ) or str(_UpperCamelCase ).startswith('''502''' ):
pytest.xfail(str(_UpperCamelCase ) )
raise err
return decorator.decorator(_wrapper , _UpperCamelCase )
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = returncode
snake_case_ : Union[str, Any] = stdout
snake_case_ : Any = stderr
async def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
while True:
snake_case_ : List[str] = await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(_UpperCamelCase ) )
snake_case_ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
def tee(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ):
snake_case_ : Tuple = line.decode('''utf-8''' ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=180 , _UpperCamelCase=False , _UpperCamelCase=True ) -> _RunOutput:
"""simple docstring"""
snake_case_ : int = asyncio.get_event_loop()
snake_case_ : Any = loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
snake_case_ : int = ''' '''.join(_UpperCamelCase )
if result.returncode > 0:
snake_case_ : Dict = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
snake_case_ : Union[str, Any] = re.sub(R'''^gw''' , '''''' , _UpperCamelCase , 0 , re.M )
return int(_UpperCamelCase )
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
snake_case_ : Dict = 29_500
snake_case_ : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 371 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=4 , ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : Tuple = is_training
snake_case_ : List[str] = use_attention_mask
snake_case_ : Any = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : Optional[int] = type_vocab_size
snake_case_ : List[Any] = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : Dict = num_choices
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Any = None
if self.use_attention_mask:
snake_case_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[Any] = None
if self.use_token_type_ids:
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : List[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs
snake_case_ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Dict = model_class_name.from_pretrained('''albert-base-v2''' )
snake_case_ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
snake_case_ : Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case_ : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ )[0]
snake_case_ : Tuple = (1, 11, 768)
self.assertEqual(output.shape , __magic_name__ )
snake_case_ : str = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
| 279 | 0 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowercase : int = logging.get_logger(__name__)
class lowerCamelCase__ :
'''simple docstring'''
_A = None
@experimental
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any]) -> List[str]:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return _map_with_joblib(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Optional[int]) -> str:
'''simple docstring'''
__UpperCamelCase : str = num_proc if num_proc <= len(_lowerCamelCase) else len(_lowerCamelCase)
__UpperCamelCase : Tuple = [] # We organize the splits ourselve (contiguous splits)
for index in range(_lowerCamelCase):
__UpperCamelCase : Tuple = len(_lowerCamelCase) // num_proc
__UpperCamelCase : Any = len(_lowerCamelCase) % num_proc
__UpperCamelCase : Any = div * index + min(_lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : Optional[Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc))
if len(_lowerCamelCase) != sum(len(i[1]) for i in split_kwds):
raise ValueError(
F'Error dividing inputs iterable among processes. '
F'Total number of objects {len(_lowerCamelCase)}, '
F'length: {sum(len(i[1]) for i in split_kwds)}')
logger.info(
F'Spawning {num_proc} processes for {len(_lowerCamelCase)} objects in slices of {[len(i[1]) for i in split_kwds]}')
__UpperCamelCase , __UpperCamelCase : List[Any] = None, None
if not disable_tqdm:
__UpperCamelCase , __UpperCamelCase : Any = (RLock(),), tqdm.set_lock
with Pool(_lowerCamelCase , initargs=_lowerCamelCase , initializer=_lowerCamelCase) as pool:
__UpperCamelCase : List[str] = pool.map(_lowerCamelCase , _lowerCamelCase)
logger.info(F'Finished {num_proc} processes')
__UpperCamelCase : int = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'Unpacked {len(_lowerCamelCase)} objects')
return mapped
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Any) -> Dict:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=_lowerCamelCase):
return joblib.Parallel()(
joblib.delayed(_lowerCamelCase)((function, obj, types, None, True, None)) for obj in iterable)
@experimental
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : List[str] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
__UpperCamelCase : Dict = None | 232 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Union[str, Any] = '▁'
lowercase : Tuple = {'vocab_file': 'spiece.model'}
lowercase : Dict = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
lowercase : Any = {
'google/reformer-crime-and-punishment': 524288,
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
def __init__( self :int , a :List[Any] , a :Tuple="</s>" , a :str="<unk>" , a :Dict=[] , a :Optional[Dict[str, Any]] = None , **a :Union[str, Any] , ) -> None:
__UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a , unk_token=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
__UpperCamelCase : Optional[Any] = vocab_file
__UpperCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@property
def _lowerCamelCase ( self :Optional[Any] ) -> Any:
return self.sp_model.get_piece_size()
def _lowerCamelCase ( self :Optional[int] ) -> Dict[str, int]:
__UpperCamelCase : str = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :str ) -> List[str]:
__UpperCamelCase : Union[str, Any] = self.__dict__.copy()
__UpperCamelCase : Optional[Any] = None
return state
def __setstate__( self :int , a :List[str] ) -> int:
__UpperCamelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : int = {}
__UpperCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self :List[Any] , a :str ) -> List[str]:
return self.sp_model.encode(a , out_type=a )
def _lowerCamelCase ( self :Optional[int] , a :Optional[Any] ) -> str:
return self.sp_model.piece_to_id(a )
def _lowerCamelCase ( self :Dict , a :Union[str, Any] ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
__UpperCamelCase : Optional[int] = self.sp_model.IdToPiece(a )
return token
def _lowerCamelCase ( self :Dict , a :List[Any] ) -> Dict:
__UpperCamelCase : Optional[int] = []
__UpperCamelCase : str = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a ) + token
__UpperCamelCase : List[Any] = []
else:
current_sub_tokens.append(a )
out_string += self.sp_model.decode(a )
return out_string.strip()
def _lowerCamelCase ( self :Optional[Any] , a :str , a :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : List[Any] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , "wb" ) as fi:
__UpperCamelCase : int = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,) | 232 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "codegen"
lowercase_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=50_400 , UpperCAmelCase_ : Optional[Any]=2_048 , UpperCAmelCase_ : str=2_048 , UpperCAmelCase_ : Union[str, Any]=4_096 , UpperCAmelCase_ : List[str]=28 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : Dict=64 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]="gelu_new" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : Optional[Any]=1E-5 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Any=50_256 , UpperCAmelCase_ : Union[str, Any]=50_256 , UpperCAmelCase_ : Union[str, Any]=False , **UpperCAmelCase_ : List[str] , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =vocab_size
lowerCamelCase__: int =n_ctx
lowerCamelCase__: str =n_positions
lowerCamelCase__: str =n_embd
lowerCamelCase__: Optional[int] =n_layer
lowerCamelCase__: Dict =n_head
lowerCamelCase__: Tuple =n_inner
lowerCamelCase__: Optional[int] =rotary_dim
lowerCamelCase__: Tuple =activation_function
lowerCamelCase__: Optional[Any] =resid_pdrop
lowerCamelCase__: int =embd_pdrop
lowerCamelCase__: Tuple =attn_pdrop
lowerCamelCase__: Union[str, Any] =layer_norm_epsilon
lowerCamelCase__: List[Any] =initializer_range
lowerCamelCase__: Any =use_cache
lowerCamelCase__: Optional[Any] =bos_token_id
lowerCamelCase__: int =eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , tie_word_embeddings=UpperCAmelCase_ , **UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCAmelCase_ : PretrainedConfig , UpperCAmelCase_ : str = "default" , UpperCAmelCase_ : List[PatchingSpec] = None , UpperCAmelCase_ : bool = False , ) ->Optional[int]:
'''simple docstring'''
super().__init__(UpperCAmelCase_ , task=UpperCAmelCase_ , patching_specs=UpperCAmelCase_ , use_past=UpperCAmelCase_)
if not getattr(self._config , "pad_token_id" , UpperCAmelCase_):
# TODO: how to do that better?
lowerCamelCase__: Optional[Any] =0
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowerCamelCase__: Any =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction="inputs")
lowerCamelCase__: Optional[Any] ={0: "batch", 1: "past_sequence + sequence"}
else:
lowerCamelCase__: Tuple ={0: "batch", 1: "sequence"}
return common_inputs
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->int:
'''simple docstring'''
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->int:
'''simple docstring'''
return self._config.n_head
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ) ->Mapping[str, Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =super(UpperCAmelCase_ , self).generate_dummy_inputs(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_)
# We need to order the input in the way they appears in the forward()
lowerCamelCase__: List[str] =OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
lowerCamelCase__: Union[str, Any] =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCamelCase__: List[Any] =seqlen + 2
lowerCamelCase__: str =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__: Tuple =[
(torch.zeros(UpperCAmelCase_), torch.zeros(UpperCAmelCase_)) for _ in range(self.num_layers)
]
lowerCamelCase__: str =common_inputs["attention_mask"]
if self.use_past:
lowerCamelCase__: List[Any] =ordered_inputs["attention_mask"].dtype
lowerCamelCase__: Union[str, Any] =torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ , dtype=UpperCAmelCase_)] , dim=1)
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
return 13
| 365 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__A = logging.getLogger(__name__)
if __name__ == "__main__":
__A = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=3_0522, type=int)
__A = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, "rb") as fp:
__A = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
__A = Counter()
for tk_ids in data:
counter.update(tk_ids)
__A = [0] * args.vocab_size
for k, v in counter.items():
__A = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 273 | 0 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCAmelCase_ : Dict = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> list[int]:
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
a_ : Any = []
for num in range(len(__A ) ):
a_ : str = 0
while 2 * i * i <= odd_composites[num]:
a_ : Any = odd_composites[num] - 2 * i * i
if is_prime(__A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__A ) == n:
return list_nums
return []
def SCREAMING_SNAKE_CASE_ ( ) -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 32 |
def __lowercase ( _UpperCamelCase = 4000000 ) ->int:
"""simple docstring"""
lowercase : int = []
lowercase , lowercase : str = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCamelCase )
lowercase , lowercase : Dict = b, a + b
return sum(_UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 | 0 |
import qiskit
def UpperCamelCase__( UpperCamelCase__ : int , UpperCamelCase__ : int )->qiskit.result.counts.Counts:
A__ = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
A__ = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
A__ = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
a__: str = single_qubit_measure(2, 2)
print(F"Total count for various states are: {counts}")
| 39 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase ):
A__ = data
A__ = None
class SCREAMING_SNAKE_CASE__ :
def __init__( self ):
A__ = None
A__ = None
def __iter__( self ):
A__ = self.head
while self.head:
yield node.data
A__ = node.next
if node == self.head:
break
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join(str(__lowerCamelCase ) for item in iter(self ) )
def UpperCamelCase ( self,__lowerCamelCase ):
self.insert_nth(len(self ),__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
self.insert_nth(0,__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
A__ = Node(__lowerCamelCase )
if self.head is None:
A__ = new_node # first node points itself
A__ = A__ = new_node
elif index == 0: # insert at head
A__ = self.head
A__ = A__ = new_node
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = new_node
if index == len(self ) - 1: # insert at tail
A__ = new_node
def UpperCamelCase ( self ):
return self.delete_nth(0 )
def UpperCamelCase ( self ):
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase ( self,__lowerCamelCase = 0 ):
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
A__ = self.head
if self.head == self.tail: # just one node
A__ = A__ = None
elif index == 0: # delete head node
A__ = self.tail.next.next
A__ = self.head.next
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = temp.next.next
if index == len(self ) - 1: # delete at tail
A__ = temp
return delete_node.data
def UpperCamelCase ( self ):
return len(self ) == 0
def UpperCamelCase__( )->None:
A__ = CircularLinkedList()
assert len(UpperCamelCase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCamelCase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCamelCase__ ) == i
circular_linked_list.insert_nth(UpperCamelCase__ , i + 1 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 | 1 |
from collections.abc import Sequence
def lowerCamelCase__ ( a , a ) -> List[Any]:
return sum(c * (x**i) for i, c in enumerate(lowerCAmelCase__ ) )
def lowerCamelCase__ ( a , a ) -> Tuple:
_A: Union[str, Any] = 0.0
for coeff in reversed(lowerCAmelCase__ ):
_A: Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase__ : Dict = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase__ : Tuple = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 121 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = credit_card_number
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Dict = len(lowerCAmelCase__ ) - 2
for i in range(lowerCAmelCase__ , -1 , -2 ):
# double the value of every second digit
__UpperCAmelCase : Optional[int] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
__UpperCAmelCase : Optional[int] = cc_number[:i] + str(lowerCAmelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(lowerCAmelCase__ ) <= 16:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(lowerCAmelCase__ ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(lowerCAmelCase__ ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 254 | 0 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase_ : list[float] , UpperCamelCase_ : list[float] ):
'''simple docstring'''
_lowerCAmelCase : int = sorted(numsa + numsa )
_lowerCAmelCase , _lowerCAmelCase : int = divmod(len(UpperCamelCase_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCamelCase : Optional[Any] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 159 |
import os
from datetime import datetime as dt
from github import Github
_lowerCamelCase : List[Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase : Any = g.get_repo("""huggingface/diffusers""" )
_lowerCAmelCase : Tuple = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase : Tuple = sorted(issue.get_comments() , key=lambda UpperCamelCase_ : i.created_at , reverse=UpperCamelCase_ )
_lowerCAmelCase : List[Any] = comments[0] if len(UpperCamelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 159 | 1 |
'''simple docstring'''
def a__ ( lowercase : Union[str, Any], lowercase : Union[str, Any] ) -> int:
"""simple docstring"""
return number | (1 << position)
def a__ ( lowercase : Dict, lowercase : int ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def a__ ( lowercase : Dict, lowercase : List[str] ) -> Any:
"""simple docstring"""
return number ^ (1 << position)
def a__ ( lowercase : List[Any], lowercase : str ) -> Tuple:
"""simple docstring"""
return ((number >> position) & 1) == 1
def a__ ( lowercase : List[Any], lowercase : str ) -> Any:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"vocab_file": "spiece.model"}
_UpperCamelCase = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_UpperCamelCase = {
"AI-Sweden/gpt-sw3-126m": 2048,
"AI-Sweden/gpt-sw3-350m": 2048,
"AI-Sweden/gpt-sw3-1.6b": 2048,
"AI-Sweden/gpt-sw3-6.7b": 2048,
"AI-Sweden/gpt-sw3-20b": 2048,
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , A_ , A_=False , A_=False , A_=False , A_=None , A_=None , A_=None , A_=None , A_ = None , **A_ , ) ->None:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCAmelCase : int = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
__lowerCAmelCase : Union[str, Any] = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__lowerCAmelCase : str = '''<|endoftext|>''' if eos_token is None else eos_token
__lowerCAmelCase : Any = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__lowerCAmelCase : Dict = unk_token if pad_token is None else pad_token
__lowerCAmelCase : int = eos_token if bos_token is None else bos_token
else:
__lowerCAmelCase : Optional[int] = '''<pad>''' if pad_token is None else pad_token
__lowerCAmelCase : List[str] = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
__lowerCAmelCase : Union[str, Any] = do_lower_case
__lowerCAmelCase : Union[str, Any] = remove_space
__lowerCAmelCase : int = keep_accents
__lowerCAmelCase : Union[str, Any] = vocab_file
__lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
# Used for whitespace normalization in input texts
# fmt : off
__lowerCAmelCase : List[Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__lowerCAmelCase : int = re.compile(
f"""[{"".join(map(A_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.__dict__.copy()
__lowerCAmelCase : List[Any] = None
return state
def __setstate__( self , A_ ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCAmelCase : List[Any] = {}
__lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
__lowerCAmelCase : int = self.non_printing_characters_re.sub('''''' , A_ )
# Normalize whitespaces
__lowerCAmelCase : List[str] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
__lowerCAmelCase : Tuple = unicodedata.normalize('''NFC''' , A_ )
return text
def UpperCamelCase__ ( self , A_ , **A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = self.preprocess_text(A_ )
return self.sp_model.encode(A_ , out_type=A_ )
def UpperCamelCase__ ( self , A_ ) ->int:
'''simple docstring'''
return self.sp_model.PieceToId(A_ )
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
return self.sp_model.IdToPiece(A_ )
@staticmethod
def UpperCamelCase__ ( A_ ) ->str:
'''simple docstring'''
return out_string
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = []
__lowerCAmelCase : Tuple = ''''''
__lowerCAmelCase : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(A_ )
__lowerCAmelCase : str = False
out_string += self.sp_model.decode(A_ )
return out_string
def UpperCamelCase__ ( self ) ->Dict[str, int]:
'''simple docstring'''
__lowerCAmelCase : str = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , A_ , A_ = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase : Any = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , '''wb''' ) as fi:
__lowerCAmelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def UpperCamelCase__ ( self , A_ , A_ = False ) ->Union[List[int], List[List[int]], "torch.Tensor"]:
'''simple docstring'''
if isinstance(A_ , A_ ):
__lowerCAmelCase : Optional[Any] = self.preprocess_text(A_ )
__lowerCAmelCase : Dict = self.sp_model.encode(A_ )
else:
__lowerCAmelCase : Dict = [self.preprocess_text(A_ ) for t in text]
__lowerCAmelCase : Optional[int] = self.sp_model.encode(A_ )
if return_tensors is True or return_tensors == "pt":
__lowerCAmelCase : Tuple = torch.tensor(A_ )
return token_ids
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
return self.sp_model.decode(A_ )
def UpperCamelCase__ ( self , A_ ) ->List[int]:
'''simple docstring'''
__lowerCAmelCase : int = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
__lowerCAmelCase : Any = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(A_ ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=A_ )
| 275 | 0 |
"""simple docstring"""
import sys
lowerCamelCase = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = 1
for digit in s:
product *= int(lowerCAmelCase__ )
return product
def a__ ( lowerCAmelCase__ = N ):
UpperCAmelCase_ = -sys.maxsize - 1
UpperCAmelCase_ = n[:13]
UpperCAmelCase_ = 13
while cur_index < len(lowerCAmelCase__ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
UpperCAmelCase_ = substr[1:] + n[cur_index]
cur_index += 1
else:
UpperCAmelCase_ = max(lowerCAmelCase__ , str_eval(lowerCAmelCase__ ) )
UpperCAmelCase_ = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 368 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowerCamelCase = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowerCamelCase = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowerCamelCase = """zero2"""
lowerCamelCase = """zero3"""
lowerCamelCase = [ZEROa, ZEROa]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
UpperCAmelCase_ = parameterized.to_safe_name("_".join(str(lowerCAmelCase__ ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
lowerCamelCase = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def lowercase__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def lowercase__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> Any:
'''simple docstring'''
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def lowercase__ ( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 10 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = models[model]
UpperCAmelCase_ = self.run_trainer(
stage=_UpperCAmelCase , model_name=_UpperCAmelCase , eval_steps=_UpperCAmelCase , num_train_epochs=1 , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
self.do_checks(_UpperCAmelCase )
return output_dir
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir("./xxx" , after=_UpperCAmelCase )
UpperCAmelCase_ = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_UpperCAmelCase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
UpperCAmelCase_ = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
UpperCAmelCase_ = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
UpperCAmelCase_ = self.get_launcher(_UpperCAmelCase )
UpperCAmelCase_ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_UpperCAmelCase , env=self.get_env() )
return output_dir
def lowercase__ ( self : List[Any] , _UpperCAmelCase : int=False ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 241 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=False , ):
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 2_0, """width""": 2_0}
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_reduce_labels
def snake_case_ ( self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
__SCREAMING_SNAKE_CASE = Image.open(dataset[0]["""file"""] )
__SCREAMING_SNAKE_CASE = Image.open(dataset[1]["""file"""] )
return image, map
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
__SCREAMING_SNAKE_CASE = Image.open(ds[0]["""file"""] )
__SCREAMING_SNAKE_CASE = Image.open(ds[1]["""file"""] )
__SCREAMING_SNAKE_CASE = Image.open(ds[2]["""file"""] )
__SCREAMING_SNAKE_CASE = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Any = BeitImageProcessor if is_vision_available() else None
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = BeitImageProcessingTester(self)
@property
def snake_case_ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize"""))
self.assertTrue(hasattr(lowerCAmelCase__ , """size"""))
self.assertTrue(hasattr(lowerCAmelCase__ , """do_center_crop"""))
self.assertTrue(hasattr(lowerCAmelCase__ , """center_crop"""))
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize"""))
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean"""))
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std"""))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 2_0, """width""": 2_0})
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8})
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , crop_size=8_4 , reduce_labels=lowerCAmelCase__)
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2})
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4})
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase__)
def snake_case_ ( self):
pass
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image)
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray)
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor)
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = []
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_5_5)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_5_5)
# Test not batched input (PIL images)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = prepare_semantic_single_inputs()
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_5_5)
# Test batched input (PIL images)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = prepare_semantic_batch_inputs()
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_5_5)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = prepare_semantic_single_inputs()
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="""pt""")
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 1_5_0)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="""pt""")
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_5_5)
| 100 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = """bert-generation"""
def __init__( self , lowerCAmelCase=5_03_58 , lowerCAmelCase=10_24 , lowerCAmelCase=24 , lowerCAmelCase=16 , lowerCAmelCase=40_96 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase="absolute" , lowerCAmelCase=True , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
| 150 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A ( _lowercase , _lowercase=False ):
try:
SCREAMING_SNAKE_CASE : Tuple = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
SCREAMING_SNAKE_CASE : List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
SCREAMING_SNAKE_CASE : Optional[int] = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
__UpperCamelCase : Dict = parse_flag_from_env('RUN_SLOW', default=False)
def A ( _lowercase ):
return unittest.skip('''Test was skipped''' )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(_lowercase )
def A ( _lowercase=None , _lowercase=None ):
if test_case is None:
return partial(_lowercase , version=_lowercase )
return unittest.skipUnless(is_torch_version('''>=''' , _lowercase ) , f"""test requires torch version >= {version}""" )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(_lowercase )
def A ( _lowercase ):
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(_lowercase )
__UpperCamelCase : Optional[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A ( _lowercase ):
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(_lowercase )
class lowercase__ ( unittest.TestCase):
UpperCamelCase_ = True
@classmethod
def __A ( cls : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
@classmethod
def __A ( cls : Tuple ):
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __A ( self : Tuple ):
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class lowercase__ ( unittest.TestCase):
def __A ( self : Tuple ):
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] , UpperCamelCase__ : Union[mock.Mock, List[mock.Mock]] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = AcceleratorState()
SCREAMING_SNAKE_CASE : int = tensor[None].clone().to(state.device )
SCREAMING_SNAKE_CASE : List[Any] = gather(_lowercase ).cpu()
SCREAMING_SNAKE_CASE : List[str] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _lowercase ):
return False
return True
class lowercase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = returncode
SCREAMING_SNAKE_CASE : List[str] = stdout
SCREAMING_SNAKE_CASE : str = stderr
async def A ( _lowercase , _lowercase ):
while True:
SCREAMING_SNAKE_CASE : List[str] = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=False ):
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowercase ) )
SCREAMING_SNAKE_CASE : Any = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowercase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
def tee(_lowercase , _lowercase , _lowercase , _lowercase="" ):
SCREAMING_SNAKE_CASE : Any = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase , _lowercase , file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowercase : tee(_lowercase , _lowercase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _lowercase : tee(_lowercase , _lowercase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=_lowercase , )
return _RunOutput(await p.wait() , _lowercase , _lowercase )
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=180 , _lowercase=False , _lowercase=True ):
SCREAMING_SNAKE_CASE : Optional[Any] = asyncio.get_event_loop()
SCREAMING_SNAKE_CASE : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowercase , env=_lowercase , stdin=_lowercase , timeout=_lowercase , quiet=_lowercase , echo=_lowercase ) )
SCREAMING_SNAKE_CASE : int = ''' '''.join(_lowercase )
if result.returncode > 0:
SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class lowercase__ ( UpperCamelCase_):
pass
def A ( _lowercase , _lowercase=False ):
try:
SCREAMING_SNAKE_CASE : str = subprocess.check_output(_lowercase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_lowercase , '''decode''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{' '.join(_lowercase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 258 | import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
SCREAMING_SNAKE_CASE : Tuple = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(_lowercase ):
os.makedirs(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = model.state_dict()
def to_tf_var_name(_lowercase ):
for patt, repl in iter(_lowercase ):
SCREAMING_SNAKE_CASE : Dict = name.replace(_lowercase , _lowercase )
return f"""bert/{name}"""
def create_tf_var(_lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.dtypes.as_dtype(tensor.dtype )
SCREAMING_SNAKE_CASE : Tuple = tf.get_variable(dtype=_lowercase , shape=tensor.shape , name=_lowercase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowercase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
SCREAMING_SNAKE_CASE : List[str] = to_tf_var_name(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
SCREAMING_SNAKE_CASE : Any = torch_tensor.T
SCREAMING_SNAKE_CASE : str = create_tf_var(tensor=_lowercase , name=_lowercase , session=_lowercase )
tf.keras.backend.set_value(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Dict = session.run(_lowercase )
print(f"""Successfully created {tf_name}: {np.allclose(_lowercase , _lowercase )}""" )
SCREAMING_SNAKE_CASE : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowercase , os.path.join(_lowercase , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def A ( _lowercase=None ):
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_lowercase , required=_lowercase , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=_lowercase , default=_lowercase , required=_lowercase , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=_lowercase , required=_lowercase , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=_lowercase , required=_lowercase , help='''Directory in which to save tensorflow model''' )
SCREAMING_SNAKE_CASE : Dict = parser.parse_args(_lowercase )
SCREAMING_SNAKE_CASE : Any = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowercase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 258 | 1 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__lowerCAmelCase = logging.getLogger(__name__)
def UpperCAmelCase_ (__a : List[str] , __a : int ):
"""simple docstring"""
if os.path.exists(__a ):
if os.path.exists(os.path.join(__a , 'config.json' ) ) and os.path.isfile(
os.path.join(__a , 'config.json' ) ):
os.remove(os.path.join(__a , 'config.json' ) )
if os.path.exists(os.path.join(__a , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__a , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__a , 'pytorch_model.bin' ) )
else:
os.makedirs(__a )
model.save_pretrained(__a )
def UpperCAmelCase_ (__a : Optional[Any] , __a : Union[str, Any]=False ):
"""simple docstring"""
_a : Optional[int] = 2
if unlogit:
_a : List[str] = torch.pow(__a , __a )
_a : Union[str, Any] = p * torch.log(__a )
_a : int = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase_ (__a : Union[str, Any] ):
"""simple docstring"""
logger.info('lv, h >\t' + '\t'.join(f"""{x + 1}""" for x in range(len(__a ) ) ) )
for row in range(len(__a ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + '\t'.join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + '\t'.join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def UpperCAmelCase_ (__a : Optional[int] , __a : List[Any] , __a : Any , __a : str=True , __a : Any=True , __a : List[Any]=None , __a : Optional[int]=False ):
"""simple docstring"""
_a, _a : Optional[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
_a : Optional[int] = torch.zeros(__a , __a ).to(args.device )
_a : Any = torch.zeros(__a , __a ).to(args.device )
if head_mask is None:
_a : Union[str, Any] = torch.ones(__a , __a ).to(args.device )
head_mask.requires_grad_(requires_grad=__a )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_a : List[str] = None
_a : List[Any] = 0.0
_a : List[str] = 0.0
for step, inputs in enumerate(tqdm(__a , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_a : Optional[int] = tuple(t.to(args.device ) for t in inputs )
((_a), ) : List[str] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_a : Any = model(__a , labels=__a , head_mask=__a )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_a, _a, _a : Union[str, Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__a ):
_a : List[Any] = entropy(attn.detach() , __a )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__a ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_a : str = 2
_a : Any = torch.pow(torch.pow(__a , __a ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_a : str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__a )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__a )
logger.info('Head ranked by importance scores' )
_a : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_a : Any = torch.arange(
head_importance.numel() , device=args.device )
_a : Union[str, Any] = head_ranks.view_as(__a )
print_ad_tensor(__a )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase_ (__a : int , __a : List[str] , __a : List[Any] ):
"""simple docstring"""
_a, _a, _a : str = compute_heads_importance(__a , __a , __a , compute_entropy=__a )
_a : List[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __a , original_score * args.masking_threshold )
_a : Any = torch.ones_like(__a )
_a : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_a : List[str] = original_score
while current_score >= original_score * args.masking_threshold:
_a : str = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_a : Tuple = float('Inf' )
_a : Any = head_importance.view(-1 ).sort()[1]
if len(__a ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_a : int = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_a : Tuple = new_head_mask.view(-1 )
_a : str = 0.0
_a : List[Any] = new_head_mask.view_as(__a )
_a : Any = new_head_mask.clone().detach()
print_ad_tensor(__a )
# Compute metric and head importance again
_a, _a, _a : Tuple = compute_heads_importance(
__a , __a , __a , compute_entropy=__a , head_mask=__a )
_a : List[str] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __a , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(__a )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase_ (__a : List[Any] , __a : List[str] , __a : Dict , __a : List[Any] ):
"""simple docstring"""
_a : List[Any] = datetime.now()
_a, _a, _a : Any = compute_heads_importance(
__a , __a , __a , compute_entropy=__a , compute_importance=__a , head_mask=__a )
_a : List[Any] = 1 / loss
_a : Union[str, Any] = datetime.now() - before_time
_a : List[Any] = sum(p.numel() for p in model.parameters() )
_a : Tuple = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__a ) )
}
for k, v in heads_to_prune.items():
if isinstance(__a , __a ):
_a : List[str] = [
v,
]
assert sum(len(__a ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__a )
_a : Optional[Any] = sum(p.numel() for p in model.parameters() )
_a : Union[str, Any] = datetime.now()
_a, _a, _a : Any = compute_heads_importance(
__a , __a , __a , compute_entropy=__a , compute_importance=__a , head_mask=__a , actually_pruned=__a , )
_a : int = 1 / loss
_a : Any = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __a , __a , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __a , __a )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(__a , args.output_dir )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__a , type=__a , required=__a , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__a , type=__a , required=__a , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__a , type=__a , required=__a , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__a , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__a , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__a , type=__a , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__a , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__a , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__a , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__a , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=__a , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__a , help='Batch size.' )
parser.add_argument('--seed' , type=__a , default=4_2 )
parser.add_argument('--local_rank' , type=__a , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__a , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__a , default='' , help='Can be used for distant debugging.' )
_a : str = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_a : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_a : List[str] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_a : int = torch.device('cuda' , args.local_rank )
_a : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_a : List[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_a : Any = nn.parallel.DistributedDataParallel(
__a , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__a )
elif args.n_gpu > 1:
_a : Union[str, Any] = nn.DataParallel(__a )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__a )
torch.save(__a , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __a )
# Prepare dataset
_a : Optional[Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_a : Optional[int] = (torch.from_numpy(__a ),)
_a : Optional[int] = TensorDataset(*__a )
_a : List[str] = RandomSampler(__a )
_a : int = DataLoader(__a , sampler=__a , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__a , __a , __a )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_a : Optional[int] = mask_heads(__a , __a , __a )
prune_heads(__a , __a , __a , __a )
if __name__ == "__main__":
main()
| 271 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : int = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : str = self.dummy_uncond_unet
_a : int = PNDMScheduler()
_a : str = PNDMPipeline(unet=_a ,scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
_a : Optional[int] = torch.manual_seed(0 )
_a : Optional[Any] = pndm(generator=_a ,num_inference_steps=20 ,output_type='numpy' ).images
_a : List[str] = torch.manual_seed(0 )
_a : Any = pndm(generator=_a ,num_inference_steps=20 ,output_type='numpy' ,return_dict=_a )[0]
_a : List[Any] = image[0, -3:, -3:, -1]
_a : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : List[Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[str] = 'google/ddpm-cifar10-32'
_a : str = UNetaDModel.from_pretrained(_a )
_a : Union[str, Any] = PNDMScheduler()
_a : Tuple = PNDMPipeline(unet=_a ,scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
_a : str = torch.manual_seed(0 )
_a : Optional[Any] = pndm(generator=_a ,output_type='numpy' ).images
_a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : Tuple = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 271 | 1 |
from pathlib import Path
import fire
from tqdm import tqdm
def __lowerCamelCase ( __magic_name__ : Optional[int]="ro" , __magic_name__ : Dict="en" , __magic_name__ : Dict="wmt16" , __magic_name__ : Optional[int]=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
a__: Optional[Any] =F"{src_lang}-{tgt_lang}"
print(F"Converting {dataset}-{pair}" )
a__: Tuple =datasets.load_dataset(lowerCamelCase__ , lowerCamelCase__ )
if save_dir is None:
a__: List[str] =F"{dataset}-{pair}"
a__: Dict =Path(lowerCamelCase__ )
save_dir.mkdir(exist_ok=lowerCamelCase__ )
for split in ds.keys():
print(F"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
a__: Tuple ="val" if split == "validation" else split
a__: Optional[Any] =save_dir.joinpath(F"{fn}.source" )
a__: Optional[int] =save_dir.joinpath(F"{fn}.target" )
a__: Tuple =src_path.open("w+" )
a__: List[str] =tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
a__: Union[str, Any] =x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 352 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCamelCase__ ( _a , _a ):
@register_to_config
def __init__( self : str , _a : int = 7_6_8 , ):
super().__init__()
a__: Optional[Any] =nn.Parameter(torch.zeros(1 , _a ) )
a__: List[str] =nn.Parameter(torch.ones(1 , _a ) )
def _lowerCamelCase ( self : Tuple , _a : Optional[Union[str, torch.device]] = None , _a : Optional[torch.dtype] = None , ):
a__: str =nn.Parameter(self.mean.to(_a ).to(_a ) )
a__: List[Any] =nn.Parameter(self.std.to(_a ).to(_a ) )
return self
def _lowerCamelCase ( self : List[Any] , _a : Dict ):
a__: str =(embeds - self.mean) * 1.0 / self.std
return embeds
def _lowerCamelCase ( self : List[Any] , _a : str ):
a__: Optional[Any] =(embeds * self.std) + self.mean
return embeds
| 42 | 0 |
from __future__ import annotations
import math
def lowerCamelCase__ ( a , a , a , a , a ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(_UpperCamelCase ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
def lowerCamelCase__ ( ) -> None:
_A: Optional[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
_A: List[Any] = math.log(len(_UpperCamelCase ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 121 | """simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE__ = HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
SCREAMING_SNAKE_CASE__ = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 150 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowerCAmelCase__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
lowerCAmelCase__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
lowerCAmelCase__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return float((preds == labels).mean() )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="binary" ):
"""simple docstring"""
UpperCamelCase = simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average=_SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {}
for id_pred, label in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = F"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"
UpperCamelCase = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase = [(pred, label)]
UpperCamelCase , UpperCamelCase = [], []
for question, preds_labels in question_map.items():
UpperCamelCase , UpperCamelCase = zip(*_SCREAMING_SNAKE_CASE )
UpperCamelCase = fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average="macro" )
fas.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = int(sum(pred == label for pred, label in preds_labels ) == len(_SCREAMING_SNAKE_CASE ) )
ems.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = float(sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
def snake_case_ (self ) -> Dict:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def snake_case_ (self ) -> Tuple:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def snake_case_ (self , __a , __a ) -> str:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__a , __a )}
elif self.config_name == "cb":
return acc_and_fa(__a , __a , fa_avg="macro" )
elif self.config_name == "record":
UpperCamelCase = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
UpperCamelCase = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(__a , __a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__a , __a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__a , __a )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 365 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "visual_bert"
def __init__(self , __a=3_05_22 , __a=7_68 , __a=5_12 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.02 , __a=1e-1_2 , __a=False , __a=True , __a=1 , __a=0 , __a=2 , **__a , ) -> int:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = visual_embedding_dim
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = bypass_transformer
UpperCamelCase = special_visual_initialize
| 244 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__a = '__DUMMY_TRANSFORMERS_USER__'
__a = 'Dummy User'
__a = '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'
__a = 'https://hub-ci.huggingface.co'
__a = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
__a = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
__a = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def a ( snake_case__: Dict ):
'''simple docstring'''
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , _UpperCamelCase )
@pytest.fixture
def a ( snake_case__: Union[str, Any] ):
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , _UpperCamelCase )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , _UpperCamelCase )
@pytest.fixture
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , _UpperCamelCase )
@pytest.fixture
def a ( snake_case__: Tuple , snake_case__: Dict ):
'''simple docstring'''
HfFolder.save_token(_UpperCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def a ( ):
'''simple docstring'''
return HfApi(endpoint=_UpperCamelCase )
@pytest.fixture(scope='''session''' )
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = HfFolder.get_token()
HfFolder.save_token(_UpperCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_UpperCamelCase )
@pytest.fixture
def a ( snake_case__: List[Any] ):
'''simple docstring'''
def _cleanup_repo(snake_case__: Optional[int] ):
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def a ( snake_case__: Dict ):
'''simple docstring'''
@contextmanager
def _temporary_repo(snake_case__: List[str] ):
try:
yield repo_id
finally:
cleanup_repo(_UpperCamelCase )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def a ( snake_case__: Union[str, Any] , snake_case__: Optional[Any] , snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = F'''repo_txt_data-{int(time.time() * 1_0e3 )}'''
lowercase_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data/text_data.txt''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a ( snake_case__: int , snake_case__: Tuple , snake_case__: Dict ):
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def a ( snake_case__: Tuple , snake_case__: Union[str, Any] , snake_case__: List[Any] ):
'''simple docstring'''
lowercase_ = F'''repo_zipped_txt_data-{int(time.time() * 1_0e3 )}'''
lowercase_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a ( snake_case__: str , snake_case__: Dict , snake_case__: List[str] ):
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def a ( snake_case__: Optional[int] , snake_case__: Dict , snake_case__: Dict ):
'''simple docstring'''
lowercase_ = F'''repo_zipped_img_data-{int(time.time() * 1_0e3 )}'''
lowercase_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a ( snake_case__: Optional[int] , snake_case__: Union[str, Any] , snake_case__: List[str] ):
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 30 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=_UpperCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=_UpperCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=_UpperCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=_UpperCamelCase , default=1_000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=_UpperCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=_UpperCamelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=_UpperCamelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
snake_case_ : List[Any] = parser.parse_args()
return args
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
def fn(_UpperCamelCase ):
return tokenizer(examples['''text'''] )
return fn
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Any = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
snake_case_ : Any = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
snake_case_ : Optional[int] = tf.train.Features(feature=_UpperCamelCase )
snake_case_ : Optional[Any] = tf.train.Example(features=_UpperCamelCase )
snake_case_ : Optional[Any] = example.SerializeToString()
records.append(_UpperCamelCase )
return records
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : int = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
snake_case_ : Union[str, Any] = min(len(_UpperCamelCase ) , args.limit )
snake_case_ : int = dataset.select(range(_UpperCamelCase ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
snake_case_ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
snake_case_ : str = os.path.join(args.output_dir , args.split )
if not os.path.exists(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
else:
snake_case_ : Optional[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
snake_case_ : Optional[Any] = tokenize_function(_UpperCamelCase )
snake_case_ : List[Any] = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_UpperCamelCase ):
# Concatenate all texts.
snake_case_ : Tuple = {k: sum(examples[k] , [] ) for k in examples.keys()}
snake_case_ : List[str] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
snake_case_ : int = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
snake_case_ : Union[str, Any] = {
k: [t[i : i + args.max_length] for i in range(0 , _UpperCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
snake_case_ : int = dataset_tokenized.map(_UpperCamelCase , batched=_UpperCamelCase , batch_size=1_000 , num_proc=4 )
snake_case_ : str = 0
snake_case_ : Optional[Any] = 0
for shard in range(0 , len(_UpperCamelCase ) , args.shard_size ):
snake_case_ : Any = grouped_dataset[shard : shard + args.shard_size]
snake_case_ : str = len(dataset_snapshot['''input_ids'''] )
snake_case_ : Union[str, Any] = os.path.join(_UpperCamelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
snake_case_ : Dict = get_serialized_examples(_UpperCamelCase )
with tf.io.TFRecordWriter(_UpperCamelCase ) as out_file:
for i in range(len(_UpperCamelCase ) ):
snake_case_ : List[str] = serialized_examples[i]
out_file.write(_UpperCamelCase )
print('''Wrote file {} containing {} records'''.format(_UpperCamelCase , _UpperCamelCase ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
main(args)
| 279 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Any = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Union[str, Any] = """openai-gpt"""
_lowerCAmelCase : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , lowercase_ : Optional[Any]=40478 , lowercase_ : Dict=512 , lowercase_ : List[str]=768 , lowercase_ : List[str]=12 , lowercase_ : List[str]=12 , lowercase_ : str="gelu" , lowercase_ : str=0.1 , lowercase_ : str=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[int]=1E-5 , lowercase_ : Tuple=0.02 , lowercase_ : Union[str, Any]="cls_index" , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=True , lowercase_ : Any=0.1 , **lowercase_ : List[str] , ):
snake_case_ : Optional[int] = vocab_size
snake_case_ : List[str] = n_positions
snake_case_ : str = n_embd
snake_case_ : Optional[int] = n_layer
snake_case_ : List[str] = n_head
snake_case_ : List[str] = afn
snake_case_ : List[Any] = resid_pdrop
snake_case_ : int = embd_pdrop
snake_case_ : Tuple = attn_pdrop
snake_case_ : Dict = layer_norm_epsilon
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Optional[Any] = summary_type
snake_case_ : Optional[int] = summary_use_proj
snake_case_ : List[Any] = summary_activation
snake_case_ : Any = summary_first_dropout
snake_case_ : Any = summary_proj_to_labels
super().__init__(**lowercase_ )
| 155 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase__ : Any = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Tuple = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : Union[str, Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
lowercase__ : Any = {
'''google/rembert''': 2_56,
}
lowercase__ : Optional[Any] = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
_lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Tuple = RemBertTokenizer
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : List[Any]=True , lowercase_ : str=True , lowercase_ : Optional[int]=False , lowercase_ : List[Any]="[CLS]" , lowercase_ : Union[str, Any]="[SEP]" , lowercase_ : str="<unk>" , lowercase_ : Tuple="[SEP]" , lowercase_ : Optional[int]="<pad>" , lowercase_ : List[Any]="[CLS]" , lowercase_ : Union[str, Any]="[MASK]" , **lowercase_ : Dict , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : List[str] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
snake_case_ : Optional[int] = do_lower_case
snake_case_ : List[Any] = remove_space
snake_case_ : str = keep_accents
snake_case_ : str = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
def _snake_case ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ : Optional[int] = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1]
def _snake_case ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ : Union[str, Any] = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowercase_ ) )
return
snake_case_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 155 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case : List[Any] = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
snake_case : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 240 |
import cva
import numpy as np
class A_ :
def __init__( self , _A , _A ):
'''simple docstring'''
if k in (0.04, 0.06):
UpperCAmelCase = k
UpperCAmelCase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ):
'''simple docstring'''
return str(self.k )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = cva.imread(_A , 0 )
UpperCAmelCase , UpperCAmelCase = img.shape
UpperCAmelCase = []
UpperCAmelCase = img.copy()
UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB )
UpperCAmelCase , UpperCAmelCase = np.gradient(_A )
UpperCAmelCase = dx**2
UpperCAmelCase = dy**2
UpperCAmelCase = dx * dy
UpperCAmelCase = 0.04
UpperCAmelCase = self.window_size // 2
for y in range(_A , h - offset ):
for x in range(_A , w - offset ):
UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = (wxx * wyy) - (wxy**2)
UpperCAmelCase = wxx + wyy
UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
__A : Tuple = HarrisCorner(0.04, 3)
__A , __A : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 273 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( lowercase : float , lowercase : float , lowercase : float ) -> Union[str, Any]:
"""simple docstring"""
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def __lowerCAmelCase ( lowercase : float , lowercase : float , lowercase : float , ) -> Union[str, Any]:
"""simple docstring"""
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __lowerCAmelCase ( lowercase : float , lowercase : float , lowercase : float , ) -> int:
"""simple docstring"""
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
lowercase , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MgpstrTokenizer
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : List[str] = False
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case : List[str] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case : Union[str, Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : int = "tester"
snake_case : List[str] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
pass
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : int = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
snake_case : List[Any] = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case : str = tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 1 )
snake_case : List[Any] = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
snake_case ,snake_case : Union[str, Any] = self.get_input_output_texts(UpperCamelCase__ )
snake_case : Dict = tokenizer.tokenize(UpperCamelCase__ )
snake_case : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
snake_case : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertNotEqual(len(UpperCamelCase__ ) , 0 )
snake_case : Tuple = tokenizer.decode(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(text_a.replace(" " , "" ) , UpperCamelCase__ )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
| 112 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
@slow
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
_UpperCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
_UpperCAmelCase = bertabert.config.encoder.vocab_size
_UpperCAmelCase = tokenizer.sep_token_id
_UpperCAmelCase = tokenizer.cls_token_id
_UpperCAmelCase = 128
_UpperCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
_UpperCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
_UpperCAmelCase = train_dataset.select(range(32 ) )
_UpperCAmelCase = val_dataset.select(range(16 ) )
_UpperCAmelCase = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase = tokenizer(batch['article'] , padding='max_length' , truncation=UpperCAmelCase , max_length=512 )
_UpperCAmelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=UpperCAmelCase , max_length=128 )
_UpperCAmelCase = inputs.input_ids
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = outputs.input_ids
_UpperCAmelCase = outputs.input_ids.copy()
_UpperCAmelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
_UpperCAmelCase = outputs.attention_mask
assert all(len(UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase ):
_UpperCAmelCase = pred.label_ids
_UpperCAmelCase = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
_UpperCAmelCase = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
_UpperCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase ) )] ) / len(UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase , batch_size=UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
_UpperCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase , batch_size=UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase , per_device_train_batch_size=UpperCAmelCase , per_device_eval_batch_size=UpperCAmelCase , predict_with_generate=UpperCAmelCase , evaluation_strategy='steps' , do_train=UpperCAmelCase , do_eval=UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCAmelCase = SeqaSeqTrainer(
model=UpperCAmelCase , args=UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , tokenizer=UpperCAmelCase , )
# start training
trainer.train()
| 39 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __A ( )-> tuple[list[int], int]:
"""simple docstring"""
_UpperCAmelCase = [randint(-1_000 , 1_000 ) for i in range(10 )]
_UpperCAmelCase = randint(-5_000 , 5_000 )
return (arr, r)
_a = make_dataset()
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(__lowerCAmelCase , 3 ):
if sum(__lowerCAmelCase ) == target:
return tuple(sorted(__lowerCAmelCase ) )
return (0, 0, 0)
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_UpperCAmelCase = len(__lowerCAmelCase )
for i in range(n - 1 ):
_UpperCAmelCase , _UpperCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __A ( )-> tuple[float, float]:
"""simple docstring"""
_UpperCAmelCase = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
_UpperCAmelCase = '\ntriplet_sum1(*dataset)\n'
_UpperCAmelCase = '\ntriplet_sum2(*dataset)\n'
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
return (min(__lowerCAmelCase ), min(__lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_a = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 39 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = relative_attention
_snake_case = position_biased_input
_snake_case = pos_att_type
_snake_case = scope
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = TFDebertaVaModel(config=_SCREAMING_SNAKE_CASE )
_snake_case = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case = [input_ids, input_mask]
_snake_case = model(_SCREAMING_SNAKE_CASE )
_snake_case = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = TFDebertaVaForMaskedLM(config=_SCREAMING_SNAKE_CASE )
_snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_snake_case = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = TFDebertaVaForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
_snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_snake_case = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = TFDebertaVaForTokenClassification(config=_SCREAMING_SNAKE_CASE )
_snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_snake_case = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = TFDebertaVaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
_snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_snake_case = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
__lowercase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__lowercase = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase = False
__lowercase = False
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = TFDebertaVaModelTester(self )
_snake_case = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
_snake_case = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_snake_case = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_snake_case = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
_snake_case = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
| 355 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowercase : List[str] = ""
lowercase : Optional[int] = ""
lowercase : int = ""
lowercase : Tuple = 1 # (0 is vertical, 1 is horizontal)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_snake_case , _snake_case = get_dataset(__A , __A )
print('Processing...' )
_snake_case , _snake_case , _snake_case = update_image_and_anno(__A , __A , __A )
for index, image in enumerate(__A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_snake_case = random_chars(32 )
_snake_case = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_snake_case = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(F'/{file_root}.jpg' , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Success {index+1}/{len(__A )} with {file_name}' )
_snake_case = []
for anno in new_annos[index]:
_snake_case = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(__A )
with open(F'/{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[list, list]:
_snake_case = []
_snake_case = []
for label_file in glob.glob(os.path.join(__A , '*.txt' ) ):
_snake_case = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__A ) as in_file:
_snake_case = in_file.readlines()
_snake_case = os.path.join(__A , F'{label_name}.jpg' )
_snake_case = []
for obj_list in obj_lists:
_snake_case = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = 1 ) -> tuple[list, list, list]:
_snake_case = []
_snake_case = []
_snake_case = []
for idx in range(len(__A ) ):
_snake_case = []
_snake_case = img_list[idx]
path_list.append(__A )
_snake_case = anno_list[idx]
_snake_case = cva.imread(__A )
if flip_type == 1:
_snake_case = cva.flip(__A , __A )
for bbox in img_annos:
_snake_case = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_snake_case = cva.flip(__A , __A )
for bbox in img_annos:
_snake_case = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__A )
new_imgs_list.append(__A )
return new_imgs_list, new_annos_lists, path_list
def SCREAMING_SNAKE_CASE__ ( __A = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_snake_case = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 160 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE :Dict = logging.getLogger()
def _lowerCAmelCase ( )->Tuple:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument("-f" )
snake_case_ = parser.parse_args()
return args.f
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] )->Any:
'''simple docstring'''
snake_case_ = {}
snake_case_ = os.path.join(lowerCAmelCase_ , "all_results.json" )
if os.path.exists(lowerCAmelCase_ ):
with open(lowerCAmelCase_ , "r" ) as f:
snake_case_ = json.load(lowerCAmelCase_ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def _lowerCAmelCase ( )->Optional[Any]:
'''simple docstring'''
snake_case_ = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
SCREAMING_SNAKE_CASE :Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( a ):
"""simple docstring"""
@classmethod
def lowerCAmelCase__ ( cls : Any ) -> str:
"""simple docstring"""
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
snake_case_ = tempfile.mkdtemp()
snake_case_ = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
snake_case_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def lowerCAmelCase__ ( cls : List[Any] ) -> str:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
snake_case_ = get_results(_lowerCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
snake_case_ = get_results(_lowerCAmelCase )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_lowerCAmelCase )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case_ = 7 if get_gpu_count() > 1 else 2
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_lowerCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_lowerCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_lowerCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_lowerCAmelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_lowerCAmelCase )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , "translation_no_trainer" ) ) )
@slow
def lowerCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = logging.StreamHandler(sys.stdout )
logger.addHandler(_lowerCAmelCase )
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
snake_case_ = get_results(_lowerCAmelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
snake_case_ = get_results(_lowerCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , "image_classification_no_trainer" ) ) )
| 159 |
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE :Tuple = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE :Tuple = BASE_URL + '''/user'''
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE :Optional[Any] = os.environ.get('''USER_TOKEN''', '''''')
def _lowerCAmelCase ( lowerCAmelCase_ :str )->dict[Any, Any]:
'''simple docstring'''
snake_case_ = {
"Authorization": F'''token {auth_token}''',
"Accept": "application/vnd.github.v3+json",
}
return requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 159 | 1 |
'''simple docstring'''
from __future__ import annotations
def A__ ( UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) == 0:
return array
_UpperCamelCase , _UpperCamelCase : int = min(UpperCAmelCase_ ), max(UpperCAmelCase_ )
# Compute the variables
_UpperCamelCase : str = _max - _min + 1
_UpperCamelCase , _UpperCamelCase : Tuple = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_UpperCamelCase : List[str] = i - _min
_UpperCamelCase : Optional[Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_UpperCamelCase : Union[str, Any] = 0
for i in range(UpperCAmelCase_ ):
while holes_repeat[i] > 0:
_UpperCamelCase : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Any = input('Enter numbers separated by comma:\n')
snake_case_ : Any = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 236 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
snake_case_ : List[Any] = None
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Dict = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
snake_case_ : List[str] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
snake_case_ : str = {
'facebook/nllb-large-en-ro': 1024,
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
snake_case_ : Optional[Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowercase__ ( lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = NllbTokenizer
lowercase__ = []
lowercase__ = []
def __init__( self : List[Any] ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : List[Any]="<s>" ,lowerCamelCase__ : Dict="</s>" ,lowerCamelCase__ : List[Any]="</s>" ,lowerCamelCase__ : Union[str, Any]="<s>" ,lowerCamelCase__ : List[Any]="<unk>" ,lowerCamelCase__ : Any="<pad>" ,lowerCamelCase__ : Optional[Any]="<mask>" ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : str=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : Union[str, Any]=False ,**lowerCamelCase__ : Optional[Any] ,):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Optional[int] = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else mask_token
_UpperCamelCase : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,src_lang=lowerCamelCase__ ,tgt_lang=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,legacy_behaviour=lowerCamelCase__ ,**lowerCamelCase__ ,)
_UpperCamelCase : int = vocab_file
_UpperCamelCase : int = False if not self.vocab_file else True
_UpperCamelCase : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_UpperCamelCase : List[str] = {
lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_UpperCamelCase : List[str] = src_lang if src_lang is not None else 'eng_Latn'
_UpperCamelCase : int = self.convert_tokens_to_ids(self._src_lang )
_UpperCamelCase : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : Dict = [self.sep_token_id]
_UpperCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] ,lowerCamelCase__ : Optional[str] ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_UpperCamelCase : Tuple = src_lang
_UpperCamelCase : Optional[Any] = self(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Tuple = self.convert_tokens_to_ids(lowerCamelCase__ )
_UpperCamelCase : str = tgt_lang_id
return inputs
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str = "eng_Latn" ,lowerCamelCase__ : Optional[List[str]] = None ,lowerCamelCase__ : str = "fra_Latn" ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : Tuple = src_lang
_UpperCamelCase : List[str] = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
_UpperCamelCase : int = self.convert_tokens_to_ids(lowerCamelCase__ )
if self.legacy_behaviour:
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : int = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase : List[Any] = [self.cur_lang_code]
_UpperCamelCase : List[Any] = [self.eos_token_id]
_UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCamelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Any = self.convert_tokens_to_ids(lowerCamelCase__ )
if self.legacy_behaviour:
_UpperCamelCase : Tuple = []
_UpperCamelCase : str = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase : Tuple = [self.cur_lang_code]
_UpperCamelCase : Optional[Any] = [self.eos_token_id]
_UpperCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCamelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_UpperCamelCase : List[Any] = os.path.join(
lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file ,lowerCamelCase__ )
return (out_vocab_file,)
| 236 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
lowercase__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
lowercase__ = {
'allenai/longformer-base-4096': 4096,
'allenai/longformer-large-4096': 4096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ) ->int:
a__: Union[str, Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
a__: List[Any] = bs[:]
a__: Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
a__: Union[str, Any] = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def __a ( _SCREAMING_SNAKE_CASE ) ->int:
a__: Dict = set()
a__: Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a__: List[Any] = char
return pairs
class __snake_case ( A__ ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase , lowercase , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , **lowercase , ) -> Union[str, Any]:
'''simple docstring'''
a__: Tuple = AddedToken(a_ , lstrip=a_ , rstrip=a_) if isinstance(a_ , a_) else bos_token
a__: List[str] = AddedToken(a_ , lstrip=a_ , rstrip=a_) if isinstance(a_ , a_) else eos_token
a__: Union[str, Any] = AddedToken(a_ , lstrip=a_ , rstrip=a_) if isinstance(a_ , a_) else sep_token
a__: Dict = AddedToken(a_ , lstrip=a_ , rstrip=a_) if isinstance(a_ , a_) else cls_token
a__: Optional[Any] = AddedToken(a_ , lstrip=a_ , rstrip=a_) if isinstance(a_ , a_) else unk_token
a__: List[str] = AddedToken(a_ , lstrip=a_ , rstrip=a_) if isinstance(a_ , a_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a__: Any = AddedToken(a_ , lstrip=a_ , rstrip=a_) if isinstance(a_ , a_) else mask_token
super().__init__(
errors=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , add_prefix_space=a_ , **a_ , )
with open(a_ , encoding='utf-8') as vocab_handle:
a__: Optional[Any] = json.load(a_)
a__: Tuple = {v: k for k, v in self.encoder.items()}
a__: str = errors # how to handle errors in decoding
a__: Tuple = bytes_to_unicode()
a__: Dict = {v: k for k, v in self.byte_encoder.items()}
with open(a_ , encoding='utf-8') as merges_handle:
a__: List[str] = merges_handle.read().split('\n')[1:-1]
a__: Any = [tuple(merge.split()) for merge in bpe_merges]
a__: List[Any] = dict(zip(a_ , range(len(a_))))
a__: int = {}
a__: int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a__: Any = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return len(self.encoder)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def lowerCamelCase_ ( self , lowercase) -> Optional[int]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
a__: Optional[int] = tuple(a_)
a__: Optional[int] = get_pairs(a_)
if not pairs:
return token
while True:
a__: Optional[int] = min(a_ , key=lambda lowercase: self.bpe_ranks.get(a_ , float('inf')))
if bigram not in self.bpe_ranks:
break
a__: Optional[int] = bigram
a__: Optional[int] = []
a__: Union[str, Any] = 0
while i < len(a_):
try:
a__: Tuple = word.index(a_ , a_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
a__: List[str] = j
if word[i] == first and i < len(a_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
a__: str = tuple(a_)
a__: Optional[int] = new_word
if len(a_) == 1:
break
else:
a__: str = get_pairs(a_)
a__: Optional[int] = " ".join(a_)
a__: Optional[int] = word
return word
def lowerCamelCase_ ( self , lowercase) -> Tuple:
'''simple docstring'''
a__: int = []
for token in re.findall(self.pat , a_):
a__: Any = "".join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a_).split(' '))
return bpe_tokens
def lowerCamelCase_ ( self , lowercase) -> Optional[int]:
'''simple docstring'''
return self.encoder.get(a_ , self.encoder.get(self.unk_token))
def lowerCamelCase_ ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
return self.decoder.get(a_)
def lowerCamelCase_ ( self , lowercase) -> str:
'''simple docstring'''
a__: List[Any] = "".join(a_)
a__: Tuple = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> str:
'''simple docstring'''
if not os.path.isdir(a_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
a__: Any = os.path.join(
a_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
a__: Dict = os.path.join(
a_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(a_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a_ , ensure_ascii=a_) + '\n')
a__: int = 0
with open(a_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase: kv[1]):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
a__: List[Any] = token_index
writer.write(' '.join(a_) + '\n')
index += 1
return vocab_file, merge_file
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> Union[str, Any]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__: Any = [self.cls_token_id]
a__: Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self , lowercase , lowercase = None , lowercase = False) -> Optional[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_)
if token_ids_a is None:
return [1] + ([0] * len(a_)) + [1]
return [1] + ([0] * len(a_)) + [1, 1] + ([0] * len(a_)) + [1]
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[Any]:
'''simple docstring'''
a__: Union[str, Any] = [self.sep_token_id]
a__: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def lowerCamelCase_ ( self , lowercase , lowercase=False , **lowercase) -> str:
'''simple docstring'''
a__: List[str] = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(a_) > 0 and not text[0].isspace()):
a__: List[Any] = " " + text
return (text, kwargs)
| 290 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowercase__ = get_logger()
lowercase__ = None
class __lowerCamelCase ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : Union[str, Any]=None , a_ : Optional[Any]=None , **a_ : Tuple ):
super().__init__(features=a_ )
import jax
from jaxlib.xla_client import Device
if isinstance(a_ , a_ ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(a_ )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
lowerCAmelCase_ : List[Any] = device if isinstance(a_ , a_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase_ : Dict = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
lowerCAmelCase_ : Tuple = str(jax.devices()[0] )
lowerCAmelCase_ : Dict = jnp_array_kwargs
@staticmethod
def lowerCamelCase ( ):
import jax
return {str(a_ ): device for device in jax.devices()}
def lowerCamelCase ( self : Tuple , a_ : Dict ):
import jax
import jax.numpy as jnp
if isinstance(a_ , a_ ) and column:
if all(
isinstance(a_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a_ , axis=0 )
return column
def lowerCamelCase ( self : Tuple , a_ : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(a_ , (str, bytes, type(a_ )) ):
return value
elif isinstance(a_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCAmelCase_ : Any = {}
if isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCAmelCase_ : List[Any] = {"dtype": jnp.intaa}
else:
lowerCAmelCase_ : Tuple = {"dtype": jnp.intaa}
elif isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCAmelCase_ : Optional[int] = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a_ , PIL.Image.Image ):
lowerCAmelCase_ : List[str] = np.asarray(a_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase_ : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a_ , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCamelCase ( self : Any , a_ : List[str] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a_ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a_ , "__array__" ) and not isinstance(a_ , jax.Array ):
lowerCAmelCase_ : str = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a_ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
elif isinstance(a_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
return self._tensorize(a_ )
def lowerCamelCase ( self : Dict , a_ : dict ):
return map_nested(self._recursive_tensorize , a_ , map_list=a_ )
def lowerCamelCase ( self : List[str] , a_ : pa.Table ):
lowerCAmelCase_ : Tuple = self.numpy_arrow_extractor().extract_row(a_ )
lowerCAmelCase_ : Union[str, Any] = self.python_features_decoder.decode_row(a_ )
return self.recursive_tensorize(a_ )
def lowerCamelCase ( self : Any , a_ : pa.Table ):
lowerCAmelCase_ : Dict = self.numpy_arrow_extractor().extract_column(a_ )
lowerCAmelCase_ : Optional[int] = self.python_features_decoder.decode_column(a_ , pa_table.column_names[0] )
lowerCAmelCase_ : List[str] = self.recursive_tensorize(a_ )
lowerCAmelCase_ : Optional[Any] = self._consolidate(a_ )
return column
def lowerCamelCase ( self : Tuple , a_ : pa.Table ):
lowerCAmelCase_ : Tuple = self.numpy_arrow_extractor().extract_batch(a_ )
lowerCAmelCase_ : Tuple = self.python_features_decoder.decode_batch(a_ )
lowerCAmelCase_ : List[str] = self.recursive_tensorize(a_ )
for column_name in batch:
lowerCAmelCase_ : Tuple = self._consolidate(batch[column_name] )
return batch
| 241 | 0 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (KDPMaDiscreteScheduler,)
lowercase_ = 10
def lowerCAmelCase_ ( self : Any , **_lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = {
'num_train_timesteps': 1_100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_lowerCAmelCase )
return config
def lowerCAmelCase_ ( self : List[Any] ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ = sample.to(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = output.prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(_lowerCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1_112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def lowerCAmelCase_ ( self : Any ):
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ = sample.to(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = output.prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(_lowerCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def lowerCAmelCase_ ( self : Tuple ):
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter.to(_lowerCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = output.prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(_lowerCAmelCase ) )
if str(_lowerCAmelCase ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3 | 210 |
import cva
import numpy as np
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : float , _lowerCAmelCase : int ):
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : Tuple ):
return str(self.k )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = cva.imread(_lowerCAmelCase , 0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(_lowerCAmelCase , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.gradient(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(_lowerCAmelCase , h - offset ):
for x in range(_lowerCAmelCase , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase__ , lowerCamelCase__ : str = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img) | 210 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = KandinskyVaaPriorPipeline
__lowerCAmelCase = ['''prompt''']
__lowerCAmelCase = ['''prompt''', '''negative_prompt''']
__lowerCAmelCase = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
__lowerCAmelCase = False
@property
def A (self : Tuple ):
return 32
@property
def A (self : Union[str, Any] ):
return 32
@property
def A (self : List[str] ):
return self.time_input_dim
@property
def A (self : List[Any] ):
return self.time_input_dim * 4
@property
def A (self : Optional[int] ):
return 100
@property
def A (self : Dict ):
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def A (self : Dict ):
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowerCAmelCase )
@property
def A (self : Optional[int] ):
torch.manual_seed(0 )
A = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
A = PriorTransformer(**_lowerCAmelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def A (self : Any ):
torch.manual_seed(0 )
A = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A = CLIPVisionModelWithProjection(_lowerCAmelCase )
return model
@property
def A (self : Union[str, Any] ):
A = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowerCAmelCase , do_normalize=_lowerCAmelCase , do_resize=_lowerCAmelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
def A (self : int ):
A = self.dummy_prior
A = self.dummy_image_encoder
A = self.dummy_text_encoder
A = self.dummy_tokenizer
A = self.dummy_image_processor
A = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_lowerCAmelCase , clip_sample_range=10.0 , )
A = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def A (self : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : str=0 ):
if str(_lowerCAmelCase ).startswith("""mps""" ):
A = torch.manual_seed(_lowerCAmelCase )
else:
A = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
A = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def A (self : Dict ):
A = """cpu"""
A = self.get_dummy_components()
A = self.pipeline_class(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
A = output.image_embeds
A = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
A = image[0, -10:]
A = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A (self : Optional[Any] ):
A = torch_device == """cpu"""
A = True
A = False
self._test_inference_batch_single_identical(
test_max_difference=_lowerCAmelCase , relax_max_difference=_lowerCAmelCase , test_mean_pixel_difference=_lowerCAmelCase , )
@skip_mps
def A (self : Optional[int] ):
A = torch_device == """cpu"""
A = False
self._test_attention_slicing_forward_pass(
test_max_difference=_lowerCAmelCase , test_mean_pixel_difference=_lowerCAmelCase , )
| 258 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
_lowerCamelCase : List[str] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_lowerCamelCase : List[Any] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_lowerCamelCase : Dict = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def A (self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def A (self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A = evaluate(dataset=_lowerCAmelCase , predictions=_lowerCAmelCase )
return score
| 258 | 1 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowerCAmelCase__ : Optional[int] ='''sshleifer/mar_enro_6_3_student'''
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=_A , )
__SCREAMING_SNAKE_CASE = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def _A ( self ):
'''simple docstring'''
MarianMTModel.from_pretrained(_A )
@slow
@require_torch_gpu
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
__SCREAMING_SNAKE_CASE = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
__SCREAMING_SNAKE_CASE = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
__SCREAMING_SNAKE_CASE = bash_script.replace(_A , str(_A ) )
__SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__SCREAMING_SNAKE_CASE = f"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__SCREAMING_SNAKE_CASE = ['finetune.py'] + bash_script.split() + args
with patch.object(_A , 'argv' , _A ):
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
__SCREAMING_SNAKE_CASE = pl.Trainer.add_argparse_args(_A )
__SCREAMING_SNAKE_CASE = SummarizationModule.add_model_specific_args(_A , os.getcwd() )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = main(_A )
# Check metrics
__SCREAMING_SNAKE_CASE = load_json(model.metrics_save_path )
__SCREAMING_SNAKE_CASE = metrics['val'][0]
__SCREAMING_SNAKE_CASE = metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , _A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.0_1 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__SCREAMING_SNAKE_CASE = os.listdir(_A )
__SCREAMING_SNAKE_CASE = [x for x in contents if x.endswith('.ckpt' )][0]
__SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , _A )
__SCREAMING_SNAKE_CASE = torch.load(_A , map_location='cpu' )
__SCREAMING_SNAKE_CASE = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__SCREAMING_SNAKE_CASE = {os.path.basename(_A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
__SCREAMING_SNAKE_CASE = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
__SCREAMING_SNAKE_CASE = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
__SCREAMING_SNAKE_CASE = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
__SCREAMING_SNAKE_CASE = bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
__SCREAMING_SNAKE_CASE = bash_script.replace(_A , str(_A ) )
__SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
__SCREAMING_SNAKE_CASE = bash_script.replace('--fp16' , '' )
__SCREAMING_SNAKE_CASE = 6
__SCREAMING_SNAKE_CASE = (
['distillation.py']
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
'--gpus=1',
'--learning_rate=1e-3',
f"""--num_train_epochs={epochs}""",
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(_A , 'argv' , _A ):
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
__SCREAMING_SNAKE_CASE = pl.Trainer.add_argparse_args(_A )
__SCREAMING_SNAKE_CASE = SummarizationDistiller.add_model_specific_args(_A , os.getcwd() )
__SCREAMING_SNAKE_CASE = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__SCREAMING_SNAKE_CASE = distill_main(_A )
# Check metrics
__SCREAMING_SNAKE_CASE = load_json(model.metrics_save_path )
__SCREAMING_SNAKE_CASE = metrics['val'][0]
__SCREAMING_SNAKE_CASE = metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , _A )
# check lightning ckpt can be loaded and has a reasonable statedict
__SCREAMING_SNAKE_CASE = os.listdir(_A )
__SCREAMING_SNAKE_CASE = [x for x in contents if x.endswith('.ckpt' )][0]
__SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , _A )
__SCREAMING_SNAKE_CASE = torch.load(_A , map_location='cpu' )
__SCREAMING_SNAKE_CASE = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__SCREAMING_SNAKE_CASE = {os.path.basename(_A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 118 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
lowerCAmelCase__ : Optional[int] =True
from torch.cuda.amp import autocast
lowerCAmelCase__ : List[Any] =logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase__ : Optional[str] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase__ : Optional[bool] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
UpperCamelCase__ : Optional[bool] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
UpperCamelCase__ : Optional[float] = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
UpperCamelCase__ : Optional[float] = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
UpperCamelCase__ : Optional[float] = field(
default=0.9_9_9_9_9_5 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def __lowercase ( a__ , a__ ) -> Dict:
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__SCREAMING_SNAKE_CASE = logging.WARNING
if model_args.verbose_logging:
__SCREAMING_SNAKE_CASE = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__SCREAMING_SNAKE_CASE = logging.INFO
logger.setLevel(a__ )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : str = field(
default=UpperCamelCase_ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase__ : Optional[str] = field(
default=UpperCamelCase_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase__ : Optional[str] = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
UpperCamelCase__ : Optional[str] = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
UpperCamelCase__ : Optional[str] = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
UpperCamelCase__ : bool = field(
default=UpperCamelCase_ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase__ : Optional[int] = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase__ : Optional[int] = field(
default=UpperCamelCase_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase__ : Optional[float] = field(
default=2_0.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : WavaVecaForPreTraining
UpperCamelCase__ : WavaVecaFeatureExtractor
UpperCamelCase__ : Union[bool, str] = "longest"
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[int] = None
def __call__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.feature_extractor.pad(
_A , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__SCREAMING_SNAKE_CASE = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
__SCREAMING_SNAKE_CASE = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__SCREAMING_SNAKE_CASE = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
__SCREAMING_SNAKE_CASE = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__SCREAMING_SNAKE_CASE = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_A , min_masks=2 , )
return batch
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , *_A , _A=1 , _A=0 , _A=1.0 , **_A ):
'''simple docstring'''
super().__init__(*_A , **_A )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = max_gumbel_temp
__SCREAMING_SNAKE_CASE = min_gumbel_temp
__SCREAMING_SNAKE_CASE = gumbel_temp_decay
def _A ( self , _A , _A ):
'''simple docstring'''
model.train()
__SCREAMING_SNAKE_CASE = self._prepare_inputs(_A )
if self.use_amp:
with autocast():
__SCREAMING_SNAKE_CASE = self.compute_loss(_A , _A )
else:
__SCREAMING_SNAKE_CASE = self.compute_loss(_A , _A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__SCREAMING_SNAKE_CASE = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__SCREAMING_SNAKE_CASE = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__SCREAMING_SNAKE_CASE = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_A ).backward()
elif self.use_apex:
with amp.scale_loss(_A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __lowercase ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
configure_logger(a__ , a__ )
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__SCREAMING_SNAKE_CASE = DatasetDict()
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__SCREAMING_SNAKE_CASE = DatasetDict()
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=a__ )
def prepare_dataset(a__ ):
# check that all files have the correct sampling rate
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__SCREAMING_SNAKE_CASE = datasets.map(
a__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
__SCREAMING_SNAKE_CASE = vectorized_datasets.filter(
lambda a__ : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(a__ ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__SCREAMING_SNAKE_CASE = vectorized_datasets.map(
a__ , batched=a__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
__SCREAMING_SNAKE_CASE = WavaVecaForPreTraining(a__ )
__SCREAMING_SNAKE_CASE = DataCollatorForWavaVecaPretraining(model=a__ , feature_extractor=a__ )
__SCREAMING_SNAKE_CASE = WavaVecaPreTrainer(
model=a__ , data_collator=a__ , args=a__ , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=a__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 118 | 1 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = XLMProphetNetTokenizer
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
def _snake_case ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = XLMProphetNetTokenizer(lowercase , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = """[PAD]"""
lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """[PAD]""" )
self.assertEqual(vocab_keys[1] , """[CLS]""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(lowercase ) , 1_012 )
def _snake_case ( self ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = XLMProphetNetTokenizer(lowercase , keep_accents=lowercase )
lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] , )
@cached_property
def _snake_case ( self ) -> Dict:
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def _snake_case ( self ) -> int:
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [35_389, 6_672, 49, 2]
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@slow
def _snake_case ( self ) -> Any:
# fmt: off
lowerCAmelCase = {"""input_ids""": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
| 46 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowercase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowercase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowercase : set[int] = {ord(char) for char in VALID_CHARS}
lowercase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str | None:
_snake_case = ""
_snake_case = 42
_snake_case = 42
_snake_case = 42
for keychar, cipherchar in zip(cycle(__A ) , __A ):
_snake_case = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__A )
return decoded
def SCREAMING_SNAKE_CASE__ ( __A ) -> list[str]:
_snake_case = []
for key in product(__A , repeat=3 ):
_snake_case = try_key(__A , __A )
if encoded is not None:
possibles.append(__A )
return possibles
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE__ ( __A = "p059_cipher.txt" ) -> int:
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = Path(__A ).parent.joinpath(__A ).read_text(encoding='utf-8' )
_snake_case = [int(__A ) for number in data.strip().split(',' )]
_snake_case = filter_valid_chars(__A )
for common_word in COMMON_WORDS:
_snake_case = filter_common_word(__A , __A )
if len(__A ) == 1:
break
_snake_case = possibles[0]
return sum(ord(__A ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42 | 0 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = BarthezTokenizer
UpperCamelCase_ : List[Any] = BarthezTokenizerFast
UpperCamelCase_ : Optional[int] = True
UpperCamelCase_ : Optional[int] = True
def _lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
super().setUp()
_UpperCAmelCase : Tuple = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = "<pad>"
_UpperCAmelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCAmelCase__ ) , 1_0_1_1_2_2 )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase : Optional[int] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
_UpperCAmelCase : int = self.tokenizer(
lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
_UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé."
_UpperCAmelCase : Dict = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = {"input_ids": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase : Tuple = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=lowerCAmelCase__ , ) | 17 | '''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files", [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
], )
def __UpperCAmelCase ( a_: Tuple, a_: Any ):
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md", "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md", "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json", "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_UpperCAmelCase : List[str] = DatasetInfosDict.from_directory(a_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info", [
DatasetInfo(),
DatasetInfo(
description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ),
], )
def __UpperCAmelCase ( a_: Union[str, Any], a_: DatasetInfo ):
_UpperCAmelCase : Tuple = str(a_ )
dataset_info.write_to_directory(a_ )
_UpperCAmelCase : Any = DatasetInfo.from_directory(a_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(a_, "dataset_info.json" ) )
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = DatasetInfo(
description="foo", citation="bar", homepage="https://foo.bar", license="CC0", features=Features({"a": Value("int32" )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train", "num_examples": 42}], download_checksums={}, download_size=1_337, post_processing_size=442, dataset_size=1_234, size_in_bytes=1_337 + 442 + 1_234, )
_UpperCAmelCase : Tuple = dataset_info._to_yaml_dict()
assert sorted(a_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_UpperCAmelCase : List[Any] = yaml.safe_dump(a_ )
_UpperCAmelCase : Optional[int] = yaml.safe_load(a_ )
assert dataset_info_yaml_dict == reloaded
def __UpperCAmelCase ( ):
_UpperCAmelCase : str = DatasetInfo()
_UpperCAmelCase : List[str] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict", [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1_337 ),
} ),
], )
def __UpperCAmelCase ( a_: str, a_: DatasetInfosDict ):
_UpperCAmelCase : Union[str, Any] = str(a_ )
dataset_infos_dict.write_to_directory(a_ )
_UpperCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(a_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase : Optional[int] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(a_, "README.md" ) ) | 17 | 1 |
'''simple docstring'''
import math
import os
import sys
def lowerCAmelCase (__A):
"""simple docstring"""
_a = ''''''
try:
with open(__a , '''rb''') as binary_file:
_a = binary_file.read()
for dat in data:
_a = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''')
sys.exit()
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
lexicon.pop(__a)
_a = last_match_id
if math.loga(__a).is_integer():
for curr_key in lexicon:
_a = '''0''' + lexicon[curr_key]
_a = bin(__a)[2:]
def lowerCAmelCase (__A):
"""simple docstring"""
_a = {'''0''': '''0''', '''1''': '''1'''}
_a , _a = '''''', ''''''
_a = len(__a)
for i in range(len(__a)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_a = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__a , __a , __a , __a)
index += 1
_a = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_a = lexicon[curr_string]
result += last_match_id
return result
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = os.path.getsize(__a)
_a = bin(__a)[2:]
_a = len(__a)
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = 8
try:
with open(__a , '''wb''') as opened_file:
_a = [
to_write[i : i + byte_length]
for i in range(0 , len(__a) , __a)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append('''10000000''')
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array:
opened_file.write(int(__a , 2).to_bytes(1 , byteorder='''big'''))
except OSError:
print('''File not accessible''')
sys.exit()
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = read_file_binary(__a)
_a = compress_data(__a)
_a = add_file_length(__a , __a)
write_file_binary(__a , __a)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 211 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE_=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE_=[2, 3, 4] , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_stages
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = depths
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_labels
UpperCamelCase__ = initializer_range
UpperCamelCase__ = out_features
UpperCamelCase__ = out_indices
UpperCamelCase__ = scope
def UpperCAmelCase_ (self ):
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ (self ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = ConvNextVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase__ = None
UpperCamelCase__ = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ConvNextVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase_ (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ (self ):
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCAmelCase_ (self ):
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCAmelCase_ (self ):
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCamelCase__ = True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE_ ),
*get_values(SCREAMING_SNAKE_CASE_ ),
]:
continue
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase_ (self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCamelCase__ = False
UpperCamelCase__ = True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE_ ), *get_values(SCREAMING_SNAKE_CASE_ )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase_ (self ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ (self ):
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = preprocessor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([0.9996, 0.1966, -0.4386] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 244 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case ( unittest.TestCase):
def __init__( self : List[Any] , a__ : List[str] , a__ : Dict=13 , a__ : Optional[Any]=3 , a__ : str=2_24 , a__ : Optional[int]=30 , a__ : Optional[Any]=4_00 , a__ : Union[str, Any]=True , a__ : Any=None , a__ : str=True , a__ : List[Any]=[0.5, 0.5, 0.5] , a__ : List[str]=[0.5, 0.5, 0.5] , ) -> List[Any]:
'''simple docstring'''
_A = size if size is not None else {"height": 18, "width": 18}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
_A = image_mean
_A = image_std
def a_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = ViTImageProcessor if is_vision_available() else None
def a_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = EfficientFormerImageProcessorTester(self )
@property
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , "image_mean" ) )
self.assertTrue(hasattr(a__ , "image_std" ) )
self.assertTrue(hasattr(a__ , "do_normalize" ) )
self.assertTrue(hasattr(a__ , "do_resize" ) )
self.assertTrue(hasattr(a__ , "size" ) )
def a_ ( self : str ) -> List[str]:
'''simple docstring'''
pass
def a_ ( self : int ) -> List[Any]:
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_proc_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_A = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_A = image_processor(a__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def a_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_proc_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
_A = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_A = image_processor(a__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def a_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_proc_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
_A = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_A = image_processor(a__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , ) | 163 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "spm_char.model"}
a_ = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
a_ = {
"microsoft/speecht5_asr": 10_24,
"microsoft/speecht5_tts": 10_24,
"microsoft/speecht5_vc": 10_24,
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self : Any , a__ : List[Any] , a__ : Optional[int]="<s>" , a__ : List[Any]="</s>" , a__ : int="<unk>" , a__ : Any="<pad>" , a__ : Optional[Dict[str, Any]] = None , **a__ : str , ) -> None:
'''simple docstring'''
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , pad_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@property
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def a_ ( self : int ) -> Tuple:
'''simple docstring'''
_A = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : Optional[Any] , a__ : Any ) -> List[str]:
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : Any , a__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(a__ , out_type=a__ )
def a_ ( self : Optional[Any] , a__ : Optional[int] ) -> Dict:
'''simple docstring'''
return self.sp_model.piece_to_id(a__ )
def a_ ( self : List[str] , a__ : str ) -> Union[str, Any]:
'''simple docstring'''
_A = self.sp_model.IdToPiece(a__ )
return token
def a_ ( self : Optional[int] , a__ : Union[str, Any] ) -> str:
'''simple docstring'''
_A = []
_A = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a__ ) + token
_A = []
else:
current_sub_tokens.append(a__ )
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a_ ( self : str , a__ : Dict , a__ : Dict=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def a_ ( self : Any , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
_A = [1]
if token_ids_a is None:
return ([0] * len(a__ )) + suffix_ones
return ([0] * len(a__ )) + ([0] * len(a__ )) + suffix_ones
def a_ ( self : str , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,) | 163 | 1 |
"""simple docstring"""
from __future__ import annotations
a = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowercase (snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCAmelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
lowerCAmelCase = 1
lowerCAmelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
lowerCAmelCase = init[0]
lowerCAmelCase = init[1]
lowerCAmelCase = 0
lowerCAmelCase = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCAmelCase = [[f, g, x, y]]
lowerCAmelCase = False # flag that is set when search is complete
lowerCAmelCase = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCAmelCase = cell.pop()
lowerCAmelCase = next_cell[2]
lowerCAmelCase = next_cell[3]
lowerCAmelCase = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCAmelCase = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
lowerCAmelCase = x + DIRECTIONS[i][0]
lowerCAmelCase = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCAmelCase = g + cost
lowerCAmelCase = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCAmelCase = 1
lowerCAmelCase = i
lowerCAmelCase = []
lowerCAmelCase = goal[0]
lowerCAmelCase = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCAmelCase = x - DIRECTIONS[action[x][y]][0]
lowerCAmelCase = y - DIRECTIONS[action[x][y]][1]
lowerCAmelCase = xa
lowerCAmelCase = ya
invpath.append([x, y] )
lowerCAmelCase = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
a = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
a = [0, 0]
# all coordinates are given in format [y,x]
a = [len(grid) - 1, len(grid[0]) - 1]
a = 1
# the cost map which pushes the path closer to the goal
a = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
a = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
a = 9_9
a , a = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 155 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 155 | 1 |
'''simple docstring'''
from __future__ import annotations
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = order
# a_{0} ... a_{k}
UpperCamelCase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCamelCase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCamelCase = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCamelCase = [0.0] * self.order
def A ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
if len(snake_case__ ) < self.order:
UpperCamelCase = [1.0, *a_coeffs]
if len(snake_case__ ) != self.order + 1:
UpperCamelCase = (
f"""Expected a_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(snake_case__ )}"""
)
raise ValueError(snake_case__ )
if len(snake_case__ ) != self.order + 1:
UpperCamelCase = (
f"""Expected b_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(snake_case__ )}"""
)
raise ValueError(snake_case__ )
UpperCamelCase = a_coeffs
UpperCamelCase = b_coeffs
def A ( self : List[Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCamelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCamelCase = self.input_history[:-1]
UpperCamelCase = self.output_history[:-1]
UpperCamelCase = sample
UpperCamelCase = result
return result
| 353 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
UpperCamelCase = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(A__ ):
os.makedirs(A__ )
UpperCamelCase = model.state_dict()
def to_tf_var_name(A__ ):
for patt, repl in iter(A__ ):
UpperCamelCase = name.replace(A__ , A__ )
return F"""bert/{name}"""
def create_tf_var(A__ , A__ , A__ ):
UpperCamelCase = tf.dtypes.as_dtype(tensor.dtype )
UpperCamelCase = tf.get_variable(dtype=A__ , shape=tensor.shape , name=A__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(A__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCamelCase = to_tf_var_name(A__ )
UpperCamelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCamelCase = torch_tensor.T
UpperCamelCase = create_tf_var(tensor=A__ , name=A__ , session=A__ )
tf.keras.backend.set_value(A__ , A__ )
UpperCamelCase = session.run(A__ )
print(F"""Successfully created {tf_name}: {np.allclose(A__ , A__ )}""" )
UpperCamelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(A__ , os.path.join(A__ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def __lowerCamelCase ( A__=None ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=A__ , required=A__ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=A__ , default=A__ , required=A__ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=A__ , required=A__ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=A__ , required=A__ , help='Directory in which to save tensorflow model' )
UpperCamelCase = parser.parse_args(A__ )
UpperCamelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=A__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 249 | 0 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : torch.FloatTensor
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int = 1_6 ,SCREAMING_SNAKE_CASE__ : int = 8_8 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : int = 3_2 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : str = "geglu" ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : bool = True ,):
super().__init__()
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : str = attention_head_dim
__lowerCamelCase : Union[str, Any] = num_attention_heads * attention_head_dim
__lowerCamelCase : Optional[Any] = in_channels
__lowerCamelCase : Tuple = torch.nn.GroupNorm(num_groups=SCREAMING_SNAKE_CASE__ ,num_channels=SCREAMING_SNAKE_CASE__ ,eps=1E-6 ,affine=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# 3. Define transformers blocks
__lowerCamelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,dropout=SCREAMING_SNAKE_CASE__ ,cross_attention_dim=SCREAMING_SNAKE_CASE__ ,activation_fn=SCREAMING_SNAKE_CASE__ ,attention_bias=SCREAMING_SNAKE_CASE__ ,double_self_attention=SCREAMING_SNAKE_CASE__ ,norm_elementwise_affine=SCREAMING_SNAKE_CASE__ ,)
for d in range(SCREAMING_SNAKE_CASE__)
])
__lowerCamelCase : List[str] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : Optional[int]=1 ,SCREAMING_SNAKE_CASE__ : str=None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = hidden_states.shape
__lowerCamelCase : List[str] = batch_frames // num_frames
__lowerCamelCase : int = hidden_states
__lowerCamelCase : Optional[int] = hidden_states[None, :].reshape(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = hidden_states.permute(0 ,2 ,1 ,3 ,4)
__lowerCamelCase : Any = self.norm(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = hidden_states.permute(0 ,3 ,4 ,2 ,1).reshape(batch_size * height * width ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.proj_in(SCREAMING_SNAKE_CASE__)
# 2. Blocks
for block in self.transformer_blocks:
__lowerCamelCase : Union[str, Any] = block(
SCREAMING_SNAKE_CASE__ ,encoder_hidden_states=SCREAMING_SNAKE_CASE__ ,timestep=SCREAMING_SNAKE_CASE__ ,cross_attention_kwargs=SCREAMING_SNAKE_CASE__ ,class_labels=SCREAMING_SNAKE_CASE__ ,)
# 3. Output
__lowerCamelCase : Optional[Any] = self.proj_out(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = (
hidden_states[None, None, :]
.reshape(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
.permute(0 ,3 ,4 ,1 ,2)
.contiguous()
)
__lowerCamelCase : List[str] = hidden_states.reshape(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=SCREAMING_SNAKE_CASE__)
| 73 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase__ : Tuple = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ : List[Any] = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Tuple = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__SCREAMING_SNAKE_CASE : Any = bs[:]
__SCREAMING_SNAKE_CASE : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] ):
__SCREAMING_SNAKE_CASE : Dict = set()
__SCREAMING_SNAKE_CASE : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE : str = char
return pairs
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = VOCAB_FILES_NAMES
_A : Any = PRETRAINED_VOCAB_FILES_MAP
_A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]="replace" , lowerCAmelCase__ : Dict="<s>" , lowerCAmelCase__ : List[str]="</s>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : Tuple="<s>" , lowerCAmelCase__ : Union[str, Any]="<unk>" , lowerCAmelCase__ : Union[str, Any]="<pad>" , lowerCAmelCase__ : int="<mask>" , lowerCAmelCase__ : str=False , **lowerCAmelCase__ : int , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__SCREAMING_SNAKE_CASE : str = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : Dict = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE : Union[str, Any] = bytes_to_unicode()
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__SCREAMING_SNAKE_CASE : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
__SCREAMING_SNAKE_CASE : int = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : int = {}
__SCREAMING_SNAKE_CASE : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE : str = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = bigram
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : Optional[int] = 0
while i < len(lowerCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE : Dict = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE : Dict = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE : Tuple = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_pairs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = """ """.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = word
return word
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = """""".join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__SCREAMING_SNAKE_CASE : int = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__SCREAMING_SNAKE_CASE : Tuple = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__SCREAMING_SNAKE_CASE : List[Any] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
__SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=False , **lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE : int = """ """ + text
return (text, kwargs)
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = super()._pad(
encoded_inputs=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding_strategy=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
# Load from model defaults
if return_attention_mask is None:
__SCREAMING_SNAKE_CASE : Tuple = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__SCREAMING_SNAKE_CASE : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__SCREAMING_SNAKE_CASE : str = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCAmelCase__ )
if needs_to_be_padded:
__SCREAMING_SNAKE_CASE : Dict = len(lowerCAmelCase__ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__SCREAMING_SNAKE_CASE : Dict = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs | 112 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _a ( _SCREAMING_SNAKE_CASE ) -> list[str]:
snake_case_ = []
snake_case_ = 11
snake_case_ = int("""1""" + """0""" * digit_len )
for num in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
snake_case_ = 10
return solutions
def _a ( _SCREAMING_SNAKE_CASE = 2 ) -> int:
snake_case_ = 1.0
for fraction in fraction_list(_SCREAMING_SNAKE_CASE ):
snake_case_ = Fraction(_SCREAMING_SNAKE_CASE )
result *= frac.denominator / frac.numerator
return int(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 365 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: List[Any] = CycleDiffusionPipeline
__lowercase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
__lowercase: str = PipelineTesterMixin.required_optional_params - {"""latents"""}
__lowercase: Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""})
__lowercase: Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowercase: Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
snake_case_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1_000 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case_ = CLIPTextModel(UpperCAmelCase_ )
snake_case_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]=0 ) ->str:
"""simple docstring"""
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
snake_case_ = image / 2 + 0.5
if str(UpperCAmelCase_ ).startswith("""mps""" ):
snake_case_ = torch.manual_seed(UpperCAmelCase_ )
else:
snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
snake_case_ = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = CycleDiffusionPipeline(**UpperCAmelCase_ )
snake_case_ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = pipe(**UpperCAmelCase_ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
snake_case_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(UpperCAmelCase_ , """half""" ):
snake_case_ = module.half()
snake_case_ = CycleDiffusionPipeline(**UpperCAmelCase_ )
snake_case_ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = pipe(**UpperCAmelCase_ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
snake_case_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = """CompVis/stable-diffusion-v1-4"""
snake_case_ = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder="""scheduler""" )
snake_case_ = CycleDiffusionPipeline.from_pretrained(
UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
snake_case_ = """A black colored car"""
snake_case_ = """A blue colored car"""
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=UpperCAmelCase_ , source_prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase_ , output_type="""np""" , )
snake_case_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
snake_case_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = """CompVis/stable-diffusion-v1-4"""
snake_case_ = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder="""scheduler""" )
snake_case_ = CycleDiffusionPipeline.from_pretrained(UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
snake_case_ = """A black colored car"""
snake_case_ = """A blue colored car"""
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=UpperCAmelCase_ , source_prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase_ , output_type="""np""" , )
snake_case_ = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 233 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
A = logging.get_logger(__name__)
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=None , _UpperCAmelCase=None ):
if not conversation_id:
__a : List[Any] = uuid.uuida()
if past_user_inputs is None:
__a : Tuple = []
if generated_responses is None:
__a : Dict = []
__a : uuid.UUID = conversation_id
__a : List[str] = past_user_inputs
__a : List[str] = generated_responses
__a : Optional[str] = text
def __eq__( self , _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
__a : Any = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
__a : List[str] = text
def _lowerCamelCase ( self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__a : Any = None
def _lowerCamelCase ( self , _UpperCAmelCase ):
self.generated_responses.append(_UpperCAmelCase )
def _lowerCamelCase ( self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
__a : Any = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
__a : str = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
_UpperCamelCase , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
__a : List[Any] = self.tokenizer.eos_token
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
__a : str = {}
__a : List[Any] = {}
__a : int = {}
if min_length_for_response is not None:
__a : Dict = min_length_for_response
if minimum_tokens is not None:
__a : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
__a : Tuple = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__a : Tuple = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=0 , **_UpperCAmelCase ):
__a : Optional[Any] = super().__call__(_UpperCAmelCase , num_workers=_UpperCAmelCase , **_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=32 ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
__a : Tuple = self.tokenizer._build_conversation_input_ids(_UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__a : List[str] = self._legacy_parse_and_tokenize(_UpperCAmelCase )
if self.framework == "pt":
__a : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__a : List[Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=10 , **_UpperCAmelCase ):
__a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
__a : Tuple = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
__a : str = max_length - minimum_tokens
__a : str = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
__a : Any = model_inputs['''attention_mask'''][:, -trim:]
__a : Optional[Any] = model_inputs.pop('''conversation''' )
__a : Union[str, Any] = max_length
__a : Dict = self.model.generate(**_UpperCAmelCase , **_UpperCAmelCase )
if self.model.config.is_encoder_decoder:
__a : Optional[int] = 1
else:
__a : Tuple = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=True ):
__a : Dict = model_outputs['''output_ids''']
__a : Dict = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , )
__a : Optional[int] = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(_UpperCAmelCase )
return conversation
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Optional[Any] = self.tokenizer.eos_token_id
__a : int = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
if len(_UpperCAmelCase ) > self.tokenizer.model_max_length:
__a : int = input_ids[-self.tokenizer.model_max_length :]
return input_ids | 160 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase : Any = False
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :int ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self :Optional[int] ) -> Union[str, Any]:
__UpperCamelCase : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
__UpperCamelCase : Any = torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = pipe.dual_guided(
prompt="first prompt" , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
__UpperCamelCase : Optional[Any] = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : str = generator.manual_seed(0 )
__UpperCamelCase : Optional[Any] = pipe.dual_guided(
prompt="first prompt" , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowerCamelCase ( self :List[Any] ) -> Optional[Any]:
__UpperCamelCase : Optional[int] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : str = "cyberpunk 2077"
__UpperCamelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
__UpperCamelCase : Any = torch.manual_seed(0 )
__UpperCamelCase : Any = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images
__UpperCamelCase : str = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCamelCase : str = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__UpperCamelCase : Dict = "A painting of a squirrel eating a burger "
__UpperCamelCase : Any = torch.manual_seed(0 )
__UpperCamelCase : Tuple = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" ).images
__UpperCamelCase : List[str] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCamelCase : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__UpperCamelCase : int = pipe.image_variation(a , generator=a , output_type="numpy" ).images
__UpperCamelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCamelCase : str = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 | 371 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_A = 42
_A = 42
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Optional[Any] , a :int ) -> Tuple:
__UpperCamelCase : list[list[Edge]] = [[] for _ in range(a )]
__UpperCamelCase : str = size
def __getitem__( self :str , a :int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def _lowerCamelCase ( self :Any ) -> List[str]:
return self._size
def _lowerCamelCase ( self :Dict , a :int , a :int , a :int ) -> Any:
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(a , a ) )
def _lowerCamelCase ( self :List[str] , a :int , a :int ) -> int | None:
__UpperCamelCase : Union[str, Any] = deque([start_vertex] )
__UpperCamelCase : list[int | None] = [None] * self.size
__UpperCamelCase : Dict = 0
while queue:
__UpperCamelCase : Tuple = queue.popleft()
__UpperCamelCase : int = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__UpperCamelCase : Optional[Any] = current_distance + edge.weight
__UpperCamelCase : Dict = distances[edge.destination_vertex]
if (
isinstance(a , a )
and new_distance >= dest_vertex_distance
):
continue
__UpperCamelCase : Optional[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 151 | 0 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase=1e-12 ):
lowercase :Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(lowerCamelCase, axis=1 ), a_min=lowerCamelCase ) ).T
lowercase :Tuple = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(lowerCamelCase, axis=1 ), a_min=lowerCamelCase ) ).T
return jnp.matmul(lowerCamelCase, norm_emb_a.T )
class __lowerCAmelCase ( nn.Module):
_a = 42
_a = jnp.floataa
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Dict = FlaxCLIPVisionModule(self.config.vision_config )
lowercase :List[Any] = nn.Dense(self.config.projection_dim , use_bias=_lowerCAmelCase , dtype=self.dtype )
lowercase :List[str] = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
lowercase :List[str] = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowercase :int = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
lowercase :Optional[Any] = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self: Optional[Any] , _lowerCAmelCase: Optional[Any] ):
lowercase :List[str] = self.vision_model(_lowerCAmelCase )[1]
lowercase :Dict = self.visual_projection(_lowerCAmelCase )
lowercase :Optional[Any] = jax_cosine_distance(_lowerCAmelCase , self.special_care_embeds )
lowercase :int = jax_cosine_distance(_lowerCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowercase :List[Any] = 0.0
lowercase :Optional[Any] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowercase :str = jnp.round(_lowerCAmelCase , 3 )
lowercase :Optional[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=_lowerCAmelCase )
# Use a lower threshold if an image has any special care concept
lowercase :str = is_special_care * 0.01
lowercase :Union[str, Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowercase :Any = jnp.round(_lowerCAmelCase , 3 )
lowercase :List[str] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __lowerCAmelCase ( lowerCAmelCase):
_a = CLIPConfig
_a = '''clip_input'''
_a = FlaxStableDiffusionSafetyCheckerModule
def __init__( self: Tuple , _lowerCAmelCase: CLIPConfig , _lowerCAmelCase: Optional[Tuple] = None , _lowerCAmelCase: int = 0 , _lowerCAmelCase: jnp.dtype = jnp.floataa , _lowerCAmelCase: bool = True , **_lowerCAmelCase: Tuple , ):
if input_shape is None:
lowercase :Union[str, Any] = (1, 2_24, 2_24, 3)
lowercase :Tuple = self.module_class(config=_lowerCAmelCase , dtype=_lowerCAmelCase , **_lowerCAmelCase )
super().__init__(_lowerCAmelCase , _lowerCAmelCase , input_shape=_lowerCAmelCase , seed=_lowerCAmelCase , dtype=_lowerCAmelCase , _do_init=_do_init )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: jax.random.KeyArray , _lowerCAmelCase: Tuple , _lowerCAmelCase: FrozenDict = None ):
# init input tensor
lowercase :Tuple = jax.random.normal(_lowerCAmelCase , _lowerCAmelCase )
lowercase , lowercase :Union[str, Any] = jax.random.split(_lowerCAmelCase )
lowercase :Tuple = {"params": params_rng, "dropout": dropout_rng}
lowercase :List[str] = self.module.init(_lowerCAmelCase , _lowerCAmelCase )["params"]
return random_params
def __call__( self: Dict , _lowerCAmelCase: int , _lowerCAmelCase: dict = None , ):
lowercase :Optional[int] = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(_lowerCAmelCase , dtype=jnp.floataa ) , rngs={} , )
| 236 |
import os
from math import logaa
def UpperCAmelCase__ ( lowerCamelCase = "base_exp.txt" ):
lowercase :float = 0
lowercase :str = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase ), lowerCamelCase ) ) ):
lowercase , lowercase :str = list(map(lowerCamelCase, line.split("," ) ) )
if x * logaa(lowerCamelCase ) > largest:
lowercase :Optional[Any] = x * logaa(lowerCamelCase )
lowercase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 236 | 1 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase):
@register_to_config
def __init__( self: Optional[int] , *,
_lowerCAmelCase: int = 4 , _lowerCAmelCase: int = 7_68 , _lowerCAmelCase: int , _lowerCAmelCase: List[Any] , ):
super().__init__()
lowercase :Optional[Any] = nn.Parameter(torch.zeros(_lowerCAmelCase ) )
# parameters for additional clip time embeddings
lowercase :Dict = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
lowercase :Optional[int] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
# parameters for encoder hidden states
lowercase :Dict = clip_extra_context_tokens
lowercase :Dict = nn.Linear(
_lowerCAmelCase , self.clip_extra_context_tokens * cross_attention_dim )
lowercase :Any = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
lowercase :Dict = nn.LayerNorm(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Tuple , *, _lowerCAmelCase: Tuple , _lowerCAmelCase: Tuple , _lowerCAmelCase: int , _lowerCAmelCase: Tuple ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
lowercase :Union[str, Any] = image_embeddings.shape[0]
lowercase :str = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
lowercase :Optional[int] = classifier_free_guidance_embeddings.expand(
_lowerCAmelCase , -1 )
lowercase :List[str] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
lowercase :List[str] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
lowercase :Any = self.embedding_proj(_lowerCAmelCase )
lowercase :Union[str, Any] = self.clip_image_embeddings_project_to_time_embeddings(_lowerCAmelCase )
lowercase :Dict = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
lowercase :int = self.clip_extra_context_tokens_proj(_lowerCAmelCase )
lowercase :int = clip_extra_context_tokens.reshape(_lowerCAmelCase , -1 , self.clip_extra_context_tokens )
lowercase :Any = clip_extra_context_tokens.permute(0 , 2 , 1 )
lowercase :List[Any] = self.encoder_hidden_states_proj(_lowerCAmelCase )
lowercase :Optional[int] = self.text_encoder_hidden_states_norm(_lowerCAmelCase )
lowercase :str = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 158 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCAmelCase__ ( lowerCamelCase ):
if is_torch_version("<", "2.0.0" ) or not hasattr(lowerCamelCase, "_dynamo" ):
return False
return isinstance(lowerCamelCase, torch._dynamo.eval_frame.OptimizedModule )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase = True ):
lowercase :Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase :str = is_compiled_module(lowerCamelCase )
if is_compiled:
lowercase :str = model
lowercase :str = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCamelCase, lowerCamelCase ):
lowercase :Any = model.module
if not keep_fpaa_wrapper:
lowercase :List[Any] = getattr(lowerCamelCase, "forward" )
lowercase :Union[str, Any] = model.__dict__.pop("_original_forward", lowerCamelCase )
if original_forward is not None:
while hasattr(lowerCamelCase, "__wrapped__" ):
lowercase :Tuple = forward.__wrapped__
if forward == original_forward:
break
lowercase :Tuple = forward
if getattr(lowerCamelCase, "_converted_to_transformer_engine", lowerCamelCase ):
convert_model(lowerCamelCase, to_transformer_engine=lowerCamelCase )
if is_compiled:
lowercase :List[Any] = model
lowercase :Optional[int] = compiled_model
return model
def UpperCAmelCase__ ( ):
PartialState().wait_for_everyone()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCamelCase, lowerCamelCase )
elif PartialState().local_process_index == 0:
torch.save(lowerCamelCase, lowerCamelCase )
@contextmanager
def UpperCAmelCase__ ( **lowerCamelCase ):
for key, value in kwargs.items():
lowercase :List[str] = str(lowerCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCAmelCase__ ( lowerCamelCase ):
if not hasattr(lowerCamelCase, "__qualname__" ) and not hasattr(lowerCamelCase, "__name__" ):
lowercase :Optional[int] = getattr(lowerCamelCase, "__class__", lowerCamelCase )
if hasattr(lowerCamelCase, "__qualname__" ):
return obj.__qualname__
if hasattr(lowerCamelCase, "__name__" ):
return obj.__name__
return str(lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
for key, value in source.items():
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase :Tuple = destination.setdefault(lowerCamelCase, {} )
merge_dicts(lowerCamelCase, lowerCamelCase )
else:
lowercase :Optional[Any] = value
return destination
def UpperCAmelCase__ ( lowerCamelCase = None ):
if port is None:
lowercase :Tuple = 29500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 158 | 1 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCAmelCase ( lowercase , lowercase , **lowercase ):
"""simple docstring"""
__lowercase = AutoConfig.from_pretrained(lowercase , **lowercase )
__lowercase = AutoModelForSeqaSeqLM.from_config(lowercase )
model.save_pretrained(lowercase )
AutoTokenizer.from_pretrained(lowercase ).save_pretrained(lowercase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 210 | from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__a : Optional[Any] = logging.get_logger(__name__)
__a : List[str] = TypeVar("""DatasetType""", Dataset, IterableDataset)
def UpperCAmelCase ( lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase , (Dataset, IterableDataset) ):
if isinstance(lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowercase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}." )
if i == 0:
__lowercase , __lowercase = (
(Dataset, IterableDataset) if isinstance(lowercase , lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase , lowercase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowercase , lowercase , lowercase , info=lowercase , split=lowercase , stopping_strategy=lowercase )
else:
return _interleave_iterable_datasets(
lowercase , lowercase , lowercase , info=lowercase , split=lowercase , stopping_strategy=lowercase )
def UpperCAmelCase ( lowercase , lowercase = None , lowercase = None , lowercase = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase , (Dataset, IterableDataset) ):
if isinstance(lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowercase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}." )
if i == 0:
__lowercase , __lowercase = (
(Dataset, IterableDataset) if isinstance(lowercase , lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase , lowercase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowercase , info=lowercase , split=lowercase , axis=lowercase )
else:
return _concatenate_iterable_datasets(lowercase , info=lowercase , split=lowercase , axis=lowercase ) | 210 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __snake_case ( unittest.TestCase):
def __init__( self : str , __lowerCAmelCase : int , __lowerCAmelCase : List[str]=7 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Optional[int]=3_0 , __lowerCAmelCase : Optional[int]=4_0_0 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=1 / 2_5_5 , __lowerCAmelCase : str=True , __lowerCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Optional[int]=[0.5, 0.5, 0.5] , __lowerCAmelCase : str=True , ):
"""simple docstring"""
_lowerCamelCase : int = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
_lowerCamelCase : Any = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[Any] = min_resolution
_lowerCamelCase : List[str] = max_resolution
_lowerCamelCase : Union[str, Any] = do_resize
_lowerCamelCase : str = size
_lowerCamelCase : Any = do_rescale
_lowerCamelCase : int = rescale_factor
_lowerCamelCase : Dict = do_normalize
_lowerCamelCase : Optional[int] = image_mean
_lowerCamelCase : int = image_std
_lowerCamelCase : Optional[Any] = do_pad
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
if not batched:
_lowerCamelCase : Optional[int] = image_inputs[0]
if isinstance(_lowercase , Image.Image ):
_lowerCamelCase , _lowerCamelCase : int = image.size
else:
_lowerCamelCase , _lowerCamelCase : List[str] = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase : Optional[Any] = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase : List[str] = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase : Optional[Any] = self.size['''shortest_edge''']
_lowerCamelCase : List[str] = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase : Any = self.size['''shortest_edge''']
_lowerCamelCase : List[Any] = self.size['''shortest_edge''']
else:
_lowerCamelCase : List[Any] = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase : str = max(_lowercase , key=lambda __lowerCAmelCase : item[0] )[0]
_lowerCamelCase : str = max(_lowercase , key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __snake_case ( _lowerCAmelCase , unittest.TestCase):
snake_case__ : Optional[int] = DetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = DetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowercase , '''image_std''' ) )
self.assertTrue(hasattr(_lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowercase , '''do_rescale''' ) )
self.assertTrue(hasattr(_lowercase , '''rescale_factor''' ) )
self.assertTrue(hasattr(_lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowercase , '''size''' ) )
self.assertTrue(hasattr(_lowercase , '''do_pad''' ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , _lowercase )
_lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=_lowercase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , _lowercase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
_lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase : str = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase : Any = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
_lowerCamelCase : List[Any] = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
_lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase : int = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase : str = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase : List[str] = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
_lowerCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase : Dict = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase : Dict = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase : int = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase : Any = json.loads(f.read() )
_lowerCamelCase : List[Any] = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
_lowerCamelCase : Dict = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
_lowerCamelCase : Optional[int] = image_processing(images=_lowercase , annotations=_lowercase , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , _lowercase )
_lowerCamelCase : str = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowercase , atol=1E-4 ) )
# verify area
_lowerCamelCase : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowercase ) )
# verify boxes
_lowerCamelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowercase )
_lowerCamelCase : str = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowercase , atol=1E-3 ) )
# verify image_id
_lowerCamelCase : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowercase ) )
# verify is_crowd
_lowerCamelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowercase ) )
# verify class_labels
_lowerCamelCase : Optional[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowercase ) )
# verify orig_size
_lowerCamelCase : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowercase ) )
# verify size
_lowerCamelCase : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowercase ) )
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase : Optional[Any] = json.loads(f.read() )
_lowerCamelCase : Optional[Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
_lowerCamelCase : List[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_lowerCamelCase : Optional[Any] = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
_lowerCamelCase : Dict = image_processing(images=_lowercase , annotations=_lowercase , masks_path=_lowercase , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , _lowercase )
_lowerCamelCase : Optional[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowercase , atol=1E-4 ) )
# verify area
_lowerCamelCase : Dict = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowercase ) )
# verify boxes
_lowerCamelCase : str = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowercase )
_lowerCamelCase : str = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowercase , atol=1E-3 ) )
# verify image_id
_lowerCamelCase : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowercase ) )
# verify is_crowd
_lowerCamelCase : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowercase ) )
# verify class_labels
_lowerCamelCase : Union[str, Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowercase ) )
# verify masks
_lowerCamelCase : List[Any] = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _lowercase )
# verify orig_size
_lowerCamelCase : List[Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowercase ) )
# verify size
_lowerCamelCase : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowercase ) )
| 370 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "xglm"
snake_case__ : Dict = ["past_key_values"]
snake_case__ : str = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=2_5_6_0_0_8 , __lowerCAmelCase : int=2_0_4_8 , __lowerCAmelCase : Dict=1_0_2_4 , __lowerCAmelCase : List[str]=4_0_9_6 , __lowerCAmelCase : Tuple=2_4 , __lowerCAmelCase : Dict=1_6 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : str=2 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[Any]=2 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : int = d_model
_lowerCamelCase : Optional[Any] = ffn_dim
_lowerCamelCase : Any = num_layers
_lowerCamelCase : Union[str, Any] = attention_heads
_lowerCamelCase : List[str] = activation_function
_lowerCamelCase : Union[str, Any] = dropout
_lowerCamelCase : int = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Any = layerdrop
_lowerCamelCase : List[str] = init_std
_lowerCamelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : str = use_cache
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
| 175 | 0 |
import math
from collections.abc import Iterator
from itertools import takewhile
def a__ ( __UpperCamelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( ):
SCREAMING_SNAKE_CASE_ = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def a__ ( __UpperCamelCase = 2_0_0_0_0_0_0 ):
return sum(takewhile(lambda __UpperCamelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 118 | def a__ ( __UpperCamelCase ):
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
SCREAMING_SNAKE_CASE_ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 2
while digits < n:
index += 1
SCREAMING_SNAKE_CASE_ = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def a__ ( __UpperCamelCase = 1_0_0_0 ):
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 118 | 1 |
import math
import unittest
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _lowerCAmelCase ( self ):
with self.assertRaises(lowerCamelCase__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ), """Zero doesn\'t have any positive factors, primes must have exactly two.""", )
self.assertFalse(
is_prime(1 ), """One only has 1 positive factor, primes must have exactly two.""", )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 356 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
A : int = []
for line in lines:
A : int = re.sub(R"""#.*""" , """""" , _lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(_lowerCAmelCase )
A : Tuple = """\n""".join(_lowerCAmelCase )
# Make a hash from all this code
A : Union[str, Any] = full_str.encode("""utf-8""" )
return shaaaa(_lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
SCREAMING_SNAKE_CASE_:List[Any] = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
SCREAMING_SNAKE_CASE_:Optional[Any] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
SCREAMING_SNAKE_CASE_:Optional[int] = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
SCREAMING_SNAKE_CASE_:Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 115 | 0 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = BarthezTokenizer
__UpperCAmelCase : Optional[Any] = BarthezTokenizerFast
__UpperCAmelCase : str = True
__UpperCAmelCase : Optional[int] = True
def _lowercase ( self : Union[str, Any] ):
super().setUp()
__lowercase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname, legacy_format=UpperCAmelCase__ )
__lowercase = tokenizer
def _lowercase ( self : int ):
__lowercase = "<pad>"
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ), UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ), UpperCAmelCase__ )
def _lowercase ( self : str ):
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "<s>" )
self.assertEqual(vocab_keys[1], "<pad>" )
self.assertEqual(vocab_keys[-1], "<mask>" )
self.assertEqual(len(UpperCAmelCase__ ), 1_0_1_1_2_2 )
def _lowercase ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size, 1_0_1_1_2_2 )
@require_torch
def _lowercase ( self : int ):
__lowercase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__lowercase = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
__lowercase = self.tokenizer(
UpperCAmelCase__, max_length=len(UpperCAmelCase__ ), padding=UpperCAmelCase__, truncation=UpperCAmelCase__, return_tensors="pt" )
self.assertIsInstance(UpperCAmelCase__, UpperCAmelCase__ )
self.assertEqual((2, 6), batch.input_ids.shape )
self.assertEqual((2, 6), batch.attention_mask.shape )
__lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = "I was born in 92000, and this is falsé."
__lowercase = tokenizer.tokenize(UpperCAmelCase__ )
__lowercase = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = tokenizer.encode(UpperCAmelCase__, add_special_tokens=UpperCAmelCase__ )
__lowercase = rust_tokenizer.encode(UpperCAmelCase__, add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(UpperCAmelCase__ )
__lowercase = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ )
@slow
def _lowercase ( self : Union[str, Any] ):
# fmt: off
__lowercase = {"input_ids": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowercase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__, model_name="moussaKam/mbarthez", revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6", sequences=UpperCAmelCase__, )
| 17 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *UpperCAmelCase__ : Tuple, **UpperCAmelCase__ : List[Any] ):
pass
def _A ( UpperCamelCase_ : Union[str, Any]) -> Any:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_a = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any] ):
__lowercase = pipeline(
"document-question-answering", model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
__lowercase = INVOICE_URL
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
__lowercase = "What is the placebo?"
__lowercase = [
{
"image": load_image(UpperCAmelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _lowercase ( self : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any ):
__lowercase = dqa_pipeline(UpperCAmelCase__, top_k=2 )
self.assertEqual(
UpperCAmelCase__, [
[
{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )},
{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )},
]
]
* 3, )
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Dict ):
__lowercase = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2" )
__lowercase = INVOICE_URL
__lowercase = "How many cats are there?"
__lowercase = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 3_8, "end": 3_9},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 3_8, "end": 4_0},
]
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(UpperCAmelCase__, [] )
# We can optionnally pass directly the words and bounding boxes
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = []
__lowercase = []
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, words=UpperCAmelCase__, boxes=UpperCAmelCase__, top_k=2 )
self.assertEqual(UpperCAmelCase__, [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : List[str] ):
__lowercase = pipeline(
"document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
],
]
* 2, )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Dict ):
__lowercase = pipeline(
"document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=5_0, )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2, )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Optional[Any] ):
__lowercase = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ )
__lowercase = pipeline(
"document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
]
]
* 2, )
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
# This model should also work if `image` is set to None
__lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Union[str, Any] ):
__lowercase = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ )
__lowercase = pipeline(
"document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", max_seq_len=5_0, )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2, )
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
# This model should also work if `image` is set to None
__lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
@slow
@require_torch
def _lowercase ( self : Dict ):
__lowercase = pipeline(
"document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ), feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _lowercase ( self : List[Any] ):
pass
| 17 | 1 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCamelCase : Dict = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Dict ) -> None:
"""simple docstring"""
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a ) | 306 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler | 306 | 1 |
'''simple docstring'''
import socket
def _UpperCamelCase ( ):
UpperCAmelCase__ : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCAmelCase__ : Tuple = socket.gethostname()
UpperCAmelCase__ : List[str] = 1_2_3_1_2
sock.connect((host, port) )
sock.send(b"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
UpperCAmelCase__ : List[Any] = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(UpperCamelCase__ )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main() | 163 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__A =logging.getLogger()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = """\n""".join(UpperCamelCase__ )
Path(UpperCamelCase__ ).open("""w""" ).writelines(UpperCamelCase__ )
__A ='patrickvonplaten/t5-tiny-random'
__A ='sshleifer/bart-tiny-random'
__A ='sshleifer/tiny-mbart'
__A =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _snake_case ( a__ ):
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Any = Path(self.get_auto_remove_tmp_dir()) / """utest_input.source"""
UpperCAmelCase__ : Dict = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
UpperCAmelCase__ : Any = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = str(Path(self.get_auto_remove_tmp_dir()) / """scores.json""")
UpperCAmelCase__ : int = """translation_en_to_de""" if model == T5_TINY else """summarization"""
UpperCAmelCase__ : Union[str, Any] = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(_lowerCamelCase , """argv""" , _lowerCamelCase):
run_generate()
assert Path(_lowerCamelCase).exists()
# os.remove(Path(output_file_name))
def snake_case__ ( self):
self.run_eval_tester(_lowerCamelCase)
@parameterized.expand([BART_TINY, MBART_TINY])
@slow
def snake_case__ ( self , _lowerCamelCase):
self.run_eval_tester(_lowerCamelCase)
@parameterized.expand([T5_TINY, MBART_TINY])
@slow
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = Path(self.get_auto_remove_tmp_dir()) / """utest_input.source"""
UpperCAmelCase__ : List[str] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
UpperCAmelCase__ : int = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
UpperCAmelCase__ : int = Path(self.get_auto_remove_tmp_dir())
UpperCAmelCase__ : Any = str(tmp_dir / """scores.json""")
UpperCAmelCase__ : List[str] = str(tmp_dir / """val.target""")
_dump_articles(_lowerCamelCase , text["""en"""])
_dump_articles(_lowerCamelCase , text["""de"""])
UpperCAmelCase__ : int = """translation_en_to_de""" if model == T5_TINY else """summarization"""
UpperCAmelCase__ : List[Any] = f'''
run_eval_search.py
{model}
{str(_lowerCamelCase)}
{str(_lowerCamelCase)}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""])
with patch.object(_lowerCamelCase , """argv""" , _lowerCamelCase):
with CaptureStdout() as cs:
run_search()
UpperCAmelCase__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
UpperCAmelCase__ : Any = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""")
else:
expected_strings.extend(_lowerCamelCase)
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_lowerCamelCase).exists()
os.remove(Path(_lowerCamelCase)) | 163 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ):
snake_case_ = FunnelTokenizer
snake_case_ = FunnelTokenizerFast
snake_case_ = True
snake_case_ = True
def _lowerCamelCase ( self ) -> str:
super().setUp()
snake_case = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _lowerCamelCase ( self, **lowercase_ ) -> Tuple:
return FunnelTokenizer.from_pretrained(self.tmpdirname, **lowercase_ )
def _lowerCamelCase ( self, **lowercase_ ) -> Optional[int]:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **lowercase_ )
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = 'UNwant\u00E9d,running'
snake_case = 'unwanted, running'
return input_text, output_text
def _lowerCamelCase ( self ) -> Dict:
snake_case = self.tokenizer_class(self.vocab_file )
snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowercase_, ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ), [7, 4, 5, 10, 8, 9] )
def _lowerCamelCase ( self ) -> Any:
snake_case = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
snake_case = tokenizer('UNwant\u00E9d,running' )
snake_case = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'], [2] + [0] * sentence_len )
snake_case = tokenizer('UNwant\u00E9d,running', 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'], [2] + [0] * sentence_len + [1] * sentence_len )
| 365 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __magic_name__ ( A , A ) -> Union[str, Any]:
inspect_dataset(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __magic_name__ ( A , A ) -> int:
inspect_metric(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> Any:
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __magic_name__ ( A , A ) -> Dict:
snake_case = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
snake_case = expected_configs[0]
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = get_dataset_infos(A )
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> int:
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 332 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'dandelin/vilt-b32-finetuned-vqa'
lowerCAmelCase__ = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
lowerCAmelCase__ = 'image_qa'
lowerCAmelCase__ = AutoProcessor
lowerCAmelCase__ = AutoModelForVisualQuestionAnswering
lowerCAmelCase__ = ['image', 'text']
lowerCAmelCase__ = ['text']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["vision"] )
super().__init__(*lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Any:
return self.pre_processor(lowercase , lowercase , return_tensors="pt" )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Dict:
with torch.no_grad():
return self.model(**lowercase ).logits
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[int]:
lowerCamelCase_ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 19 |
"""simple docstring"""
import math
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 249 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_a : Optional[int]= logging.get_logger(__name__)
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Any = ["""input_features"""]
def __init__(self : int , _A : Any=80 , _A : Any=1_60_00 , _A : Union[str, Any]=1_60 , _A : int=30 , _A : str=4_00 , _A : Any=0.0 , _A : Optional[int]=False , **_A : List[str] , ) -> Optional[int]:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
__snake_case : List[str] = n_fft
__snake_case : str = hop_length
__snake_case : Optional[Any] = chunk_length
__snake_case : int = chunk_length * sampling_rate
__snake_case : List[str] = self.n_samples // hop_length
__snake_case : Any = sampling_rate
__snake_case : Optional[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=_A , norm='slaney' , mel_scale='slaney' , )
def _lowercase (self : str , _A : np.array) -> np.ndarray:
__snake_case : Optional[int] = spectrogram(
_A , window_function(self.n_fft , 'hann') , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
__snake_case : List[str] = log_spec[:, :-1]
__snake_case : str = np.maximum(_A , log_spec.max() - 8.0)
__snake_case : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase (_A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0) -> List[np.ndarray]:
if attention_mask is not None:
__snake_case : Optional[Any] = np.array(_A , np.intaa)
__snake_case : Tuple = []
for vector, length in zip(_A , attention_mask.sum(-1)):
__snake_case : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
__snake_case : List[Any] = padding_value
normed_input_values.append(_A)
else:
__snake_case : int = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__(self : Any , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : Any , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
__snake_case : Dict = isinstance(_A , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
__snake_case : Dict = is_batched_numpy or (
isinstance(_A , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__snake_case : List[str] = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray):
__snake_case : Tuple = np.asarray(_A , dtype=np.floataa)
elif isinstance(_A , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__snake_case : str = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__snake_case : int = [np.asarray([raw_speech]).T]
__snake_case : Any = BatchFeature({'input_features': raw_speech})
# convert into correct format for padding
__snake_case : Any = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__snake_case : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
__snake_case : Optional[Any] = np.stack(padded_inputs['input_features'] , axis=0)
# make sure list is in array format
__snake_case : Union[str, Any] = padded_inputs.get('input_features').transpose(2 , 0 , 1)
__snake_case : List[Any] = [self._np_extract_fbank_features(_A) for waveform in input_features[0]]
if isinstance(input_features[0] , _A):
__snake_case : Union[str, Any] = [np.asarray(_A , dtype=np.floataa) for feature in input_features]
else:
__snake_case : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__snake_case : Union[str, Any] = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
__snake_case : Dict = padded_inputs.convert_to_tensors(_A)
return padded_inputs
def _lowercase (self : Optional[int]) -> Dict[str, Any]:
__snake_case : List[Any] = copy.deepcopy(self.__dict__)
__snake_case : Union[str, Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 356 | """simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_a : Any= logging.get_logger(__name__)
_a : str= {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_a : Optional[Any]= [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_a : List[Any]= {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_a : str= {f'''funnel-transformer/{name}''': 512 for name in _model_names}
_a : List[Any]= {f'''funnel-transformer/{name}''': {"do_lower_case": True} for name in _model_names}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : int = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : Tuple = FunnelTokenizer
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : int = 2
def __init__(self : int , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=True , _A : List[str]="<unk>" , _A : Any="<sep>" , _A : Dict="<pad>" , _A : Tuple="<cls>" , _A : Dict="<mask>" , _A : Optional[Any]="<s>" , _A : List[Any]="</s>" , _A : Optional[int]=True , _A : Dict=True , _A : Tuple=None , _A : int="##" , **_A : Any , ) -> str:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , bos_token=_A , eos_token=_A , clean_text=_A , tokenize_chinese_chars=_A , strip_accents=_A , wordpieces_prefix=_A , **_A , )
__snake_case : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , _A) != do_lower_case
or normalizer_state.get('strip_accents' , _A) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A) != tokenize_chinese_chars
):
__snake_case : List[str] = getattr(_A , normalizer_state.pop('type'))
__snake_case : int = do_lower_case
__snake_case : Optional[int] = strip_accents
__snake_case : str = tokenize_chinese_chars
__snake_case : Optional[int] = normalizer_class(**_A)
__snake_case : str = do_lower_case
def _lowercase (self : Optional[Any] , _A : Dict , _A : Tuple=None) -> Any:
__snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase (self : str , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : Union[str, Any] = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0]
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowercase (self : Tuple , _A : str , _A : Optional[str] = None) -> Tuple[str]:
__snake_case : int = self._tokenizer.model.save(_A , name=_A)
return tuple(_A)
| 95 | 0 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f"""{test_file} instead.""" )
A_ : List[str] = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
A_ : Tuple = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
A_ : Tuple = """.""".join(lowerCAmelCase_ )
return test_module_path
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = get_module_path(lowerCAmelCase_ )
A_ : List[str] = importlib.import_module(lowerCAmelCase_ )
return test_module
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = []
A_ : Optional[Any] = get_test_module(lowerCAmelCase_ )
for attr in dir(lowerCAmelCase_ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda _UpperCAmelCase : x.__name__ )
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = []
A_ : Dict = get_test_module(lowerCAmelCase_ )
for attr in dir(lowerCAmelCase_ ):
A_ : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
A_ : int = getattr(lowerCAmelCase_ , '''all_model_classes''' , [] )
if len(lowerCAmelCase_ ) > 0:
test_classes.append(lowerCAmelCase_ )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda _UpperCAmelCase : x.__name__ )
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = get_test_classes(lowerCAmelCase_ )
A_ : List[str] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda _UpperCAmelCase : x.__name__ )
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = test_class()
if hasattr(lowerCAmelCase_ , '''setUp''' ):
test.setUp()
A_ : List[str] = None
if hasattr(lowerCAmelCase_ , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
A_ : Any = test.model_tester.__class__
return model_tester
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = get_test_classes(lowerCAmelCase_ )
A_ : Optional[Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowerCAmelCase_ )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda _UpperCAmelCase : x.__name__ )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : str = get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : List[str] = []
for test_class in test_classes:
A_ : List[str] = get_model_tester_from_test_class(lowerCAmelCase_ )
if tester_class is not None:
tester_classes.append(lowerCAmelCase_ )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda _UpperCAmelCase : x.__name__ )
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : str = get_test_classes(lowerCAmelCase_ )
A_ : str = {test_class: get_model_tester_from_test_class(lowerCAmelCase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[int] = get_model_classes(lowerCAmelCase_ )
A_ : Union[str, Any] = {
model_class: get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = get_model_classes(lowerCAmelCase_ )
A_ : Any = {
model_class: get_tester_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return o
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return o.__name__
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return [to_json(lowerCAmelCase_ ) for x in o]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {to_json(lowerCAmelCase_ ): to_json(lowerCAmelCase_ ) for k, v in o.items()}
else:
return o
| 167 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def snake_case_ ( lowerCAmelCase_ : int = 8 ):
__lowercase : str = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(lowerCAmelCase_ )
__lowercase : List[Any] = i // 3
__lowercase : int = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__lowercase : str = (
chars_incl
+ random(lowerCAmelCase_ , quotient + remainder )
+ random(lowerCAmelCase_ , lowerCAmelCase_ )
+ random(lowerCAmelCase_ , lowerCAmelCase_ )
)
__lowercase : int = list(lowerCAmelCase_ )
shuffle(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
# random is a generalised function for letters, characters and numbers
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int ):
pass # Put your code here...
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ):
pass # Put your code here...
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ):
pass # Put your code here...
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int = 8 ):
if len(lowerCAmelCase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
__lowercase : Tuple = any(char in ascii_uppercase for char in password )
__lowercase : Union[str, Any] = any(char in ascii_lowercase for char in password )
__lowercase : Dict = any(char in digits for char in password )
__lowercase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def snake_case_ ( ):
__lowercase : Union[str, Any] = int(input("""Please indicate the max length of your password: """ ).strip() )
__lowercase : List[str] = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(lowerCAmelCase_ ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(lowerCAmelCase_ , lowerCAmelCase_ ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main() | 233 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 354 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( UpperCAmelCase_ : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def __lowerCamelCase ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : np.ndarray ):
"""simple docstring"""
a :Optional[Any] = XGBClassifier()
classifier.fit(UpperCAmelCase_ , UpperCAmelCase_ )
return classifier
def __lowerCamelCase ( ):
"""simple docstring"""
a :List[Any] = load_iris()
a , a :Any = data_handling(UpperCAmelCase_ )
a , a , a , a :Tuple = train_test_split(
UpperCAmelCase_ , UpperCAmelCase_ , test_size=0.25 )
a :List[Any] = iris['''target_names''']
# Create an XGBoost Classifier from the training data
a :Optional[int] = xgboost(UpperCAmelCase_ , UpperCAmelCase_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , display_labels=UpperCAmelCase_ , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 281 | 0 |
import torch
from torch import nn
class lowerCamelCase__( nn.Module):
def __init__( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: int=1 , UpperCamelCase_: str=False ):
super().__init__()
__lowerCamelCase = n_token
__lowerCamelCase = d_embed
__lowerCamelCase = d_proj
__lowerCamelCase = cutoffs + [n_token]
__lowerCamelCase = [0] + self.cutoffs
__lowerCamelCase = div_val
__lowerCamelCase = self.cutoffs[0]
__lowerCamelCase = len(self.cutoffs ) - 1
__lowerCamelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__lowerCamelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
__lowerCamelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
__lowerCamelCase = nn.ModuleList()
__lowerCamelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase_ , lowercase_ ) ) )
else:
self.out_projs.append(lowercase_ )
self.out_layers.append(nn.Linear(lowercase_ , lowercase_ ) )
else:
for i in range(len(self.cutoffs ) ):
__lowerCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowerCamelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase_ , lowercase_ ) ) )
self.out_layers.append(nn.Linear(lowercase_ , r_idx - l_idx ) )
__lowerCamelCase = keep_order
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: str ):
if proj is None:
__lowerCamelCase = nn.functional.linear(lowercase_ , lowercase_ , bias=lowercase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__lowerCamelCase = nn.functional.linear(lowercase_ , proj.t().contiguous() )
__lowerCamelCase = nn.functional.linear(lowercase_ , lowercase_ , bias=lowercase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: str=False ):
if labels is not None:
# Shift so that tokens < n predict n
__lowerCamelCase = hidden[..., :-1, :].contiguous()
__lowerCamelCase = labels[..., 1:].contiguous()
__lowerCamelCase = hidden.view(-1 , hidden.size(-1 ) )
__lowerCamelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
__lowerCamelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
__lowerCamelCase = self._compute_logit(lowercase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
__lowerCamelCase = labels != -1_00
__lowerCamelCase = torch.zeros_like(lowercase_ , dtype=hidden.dtype , device=hidden.device )
__lowerCamelCase = (
-nn.functional.log_softmax(lowercase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__lowerCamelCase = nn.functional.log_softmax(lowercase_ , dim=-1 )
else:
# construct weights and biases
__lowerCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__lowerCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowerCamelCase = self.out_layers[0].weight[l_idx:r_idx]
__lowerCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
__lowerCamelCase = self.out_layers[i].weight
__lowerCamelCase = self.out_layers[i].bias
if i == 0:
__lowerCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__lowerCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowercase_ )
biases.append(lowercase_ )
__lowerCamelCase = weights[0], biases[0], self.out_projs[0]
__lowerCamelCase = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__lowerCamelCase = nn.functional.log_softmax(lowercase_ , dim=1 )
if labels is None:
__lowerCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__lowerCamelCase = torch.zeros_like(lowercase_ , dtype=hidden.dtype , device=hidden.device )
__lowerCamelCase = 0
__lowerCamelCase = [0] + self.cutoffs
for i in range(len(lowercase_ ) - 1 ):
__lowerCamelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__lowerCamelCase = (labels >= l_idx) & (labels < r_idx)
__lowerCamelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__lowerCamelCase = labels.index_select(0 , lowercase_ ) - l_idx
__lowerCamelCase = head_logprob.index_select(0 , lowercase_ )
__lowerCamelCase = hidden.index_select(0 , lowercase_ )
else:
__lowerCamelCase = hidden
if i == 0:
if labels is not None:
__lowerCamelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
__lowerCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
__lowerCamelCase = weights[i], biases[i], self.out_projs[i]
__lowerCamelCase = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__lowerCamelCase = nn.functional.log_softmax(lowercase_ , dim=1 )
__lowerCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__lowerCamelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
__lowerCamelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__lowerCamelCase = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowercase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[Any] ):
if self.n_clusters == 0:
__lowerCamelCase = self._compute_logit(lowercase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowercase_ , dim=-1 )
else:
# construct weights and biases
__lowerCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__lowerCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowerCamelCase = self.out_layers[0].weight[l_idx:r_idx]
__lowerCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
__lowerCamelCase = self.out_layers[i].weight
__lowerCamelCase = self.out_layers[i].bias
if i == 0:
__lowerCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__lowerCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowercase_ )
biases.append(lowercase_ )
__lowerCamelCase = weights[0], biases[0], self.out_projs[0]
__lowerCamelCase = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__lowerCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__lowerCamelCase = nn.functional.log_softmax(lowercase_ , dim=1 )
__lowerCamelCase = [0] + self.cutoffs
for i in range(len(lowercase_ ) - 1 ):
__lowerCamelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__lowerCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
__lowerCamelCase = weights[i], biases[i], self.out_projs[i]
__lowerCamelCase = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__lowerCamelCase = nn.functional.log_softmax(lowercase_ , dim=1 )
__lowerCamelCase = head_logprob[:, -i] + tail_logprob_i
__lowerCamelCase = logprob_i
return out
| 12 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Optional[Any] ) -> None:
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 151 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __A , unittest.TestCase ):
_lowerCamelCase = XLMRobertaTokenizer
_lowerCamelCase = XLMRobertaTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : Dict = XLMRobertaTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : Any = "<pad>"
_snake_case : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCamelCase ( self ):
_snake_case : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowercase ) , 1_002 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = XLMRobertaTokenizer(__lowercase , keep_accents=__lowercase )
_snake_case : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_snake_case : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_snake_case : List[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_snake_case : Optional[int] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
_snake_case : List[str] = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase )
_snake_case : Dict = tempfile.mkdtemp()
_snake_case : Optional[int] = tokenizer_r.save_pretrained(__lowercase )
_snake_case : Optional[int] = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_snake_case : Union[str, Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
_snake_case : Dict = tokenizer_r.from_pretrained(__lowercase )
_snake_case : Dict = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=True
_snake_case : Any = tempfile.mkdtemp()
_snake_case : int = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
_snake_case : Optional[Any] = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
_snake_case : List[Any] = tokenizer_r.from_pretrained(__lowercase )
_snake_case : str = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=False
_snake_case : Optional[Any] = tempfile.mkdtemp()
_snake_case : int = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
_snake_case : Optional[int] = tokenizer_p.save_pretrained(__lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_snake_case : Tuple = tokenizer_r.from_pretrained(__lowercase )
_snake_case : Tuple = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
@cached_property
def UpperCamelCase ( self ):
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def UpperCamelCase ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowercase , f.name )
_snake_case : List[Any] = XLMRobertaTokenizer(f.name , keep_accents=__lowercase )
_snake_case : List[Any] = pickle.dumps(__lowercase )
pickle.loads(__lowercase )
def UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
_snake_case : List[str] = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : Any = "I was born in 92000, and this is falsé."
_snake_case : Dict = tokenizer.tokenize(__lowercase )
_snake_case : str = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
_snake_case : int = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
_snake_case : Optional[int] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
_snake_case : Dict = self.get_rust_tokenizer()
_snake_case : int = tokenizer.encode(__lowercase )
_snake_case : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase ( self ):
_snake_case : List[Any] = "Hello World!"
_snake_case : Union[str, Any] = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@slow
def UpperCamelCase ( self ):
_snake_case : Dict = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_snake_case : Any = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@slow
def UpperCamelCase ( self ):
# fmt: off
_snake_case : Union[str, Any] = {"input_ids": [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , ) | 359 | from __future__ import annotations
def snake_case (__lowercase , __lowercase ) -> float:
'''simple docstring'''
_snake_case : Any = sorted(numsa + numsa )
_snake_case ,_snake_case : Any = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : Union[str, Any] = [float(x) for x in input('Enter the elements of first array: ').split()]
__SCREAMING_SNAKE_CASE : List[Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''') | 284 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ):
@register_to_config
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None ) -> int:
super().__init__()
_lowerCAmelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_lowerCAmelCase = torch.zeros(_lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCAmelCase = None
_lowerCAmelCase = torch.nn.Parameter(_lowerCAmelCase )
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : VQModel
__lowerCamelCase : CLIPTextModel
__lowerCamelCase : CLIPTokenizer
__lowerCamelCase : TransformeraDModel
__lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings
__lowerCamelCase : VQDiffusionScheduler
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> List[Any]:
super().__init__()
self.register_modules(
vqvae=_lowerCAmelCase , transformer=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , scheduler=_lowerCAmelCase , learned_classifier_free_sampling_embeddings=_lowerCAmelCase , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = len(_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else 1
# get prompt text embeddings
_lowerCAmelCase = self.tokenizer(
_lowerCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
_lowerCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_lowerCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_lowerCAmelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase = prompt_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_lowerCAmelCase = self.learned_classifier_free_sampling_embeddings.embeddings
_lowerCAmelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(_lowerCAmelCase , 1 , 1 )
else:
_lowerCAmelCase = [""] * batch_size
_lowerCAmelCase = text_input_ids.shape[-1]
_lowerCAmelCase = self.tokenizer(
_lowerCAmelCase , padding="max_length" , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="pt" , )
_lowerCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_lowerCAmelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_lowerCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase = negative_prompt_embeds.shape[1]
_lowerCAmelCase = negative_prompt_embeds.repeat(1 , _lowerCAmelCase , 1 )
_lowerCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _lowerCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _lowerCAmelCase , _lowerCAmelCase = 100 , _lowerCAmelCase = 5.0 , _lowerCAmelCase = 1.0 , _lowerCAmelCase = 1 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = 1
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(_lowerCAmelCase )}''' )
_lowerCAmelCase = batch_size * num_images_per_prompt
_lowerCAmelCase = guidance_scale > 1.0
_lowerCAmelCase = self._encode_prompt(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(_lowerCAmelCase )}.''' )
# get the initial completely masked latents unless the user supplied it
_lowerCAmelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_lowerCAmelCase = self.transformer.num_vector_embeds - 1
_lowerCAmelCase = torch.full(_lowerCAmelCase , _lowerCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
_lowerCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowerCAmelCase , device=self.device )
_lowerCAmelCase = self.scheduler.timesteps.to(self.device )
_lowerCAmelCase = latents
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_lowerCAmelCase = self.transformer(_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , timestep=_lowerCAmelCase ).sample
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase = model_output.chunk(2 )
_lowerCAmelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_lowerCAmelCase , dim=1 , keepdim=_lowerCAmelCase )
_lowerCAmelCase = self.truncate(_lowerCAmelCase , _lowerCAmelCase )
# remove `log(0)`'s (`-inf`s)
_lowerCAmelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self.vqvae.config.vq_embed_dim
_lowerCAmelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_lowerCAmelCase = self.vqvae.quantize.get_codebook_entry(_lowerCAmelCase , shape=_lowerCAmelCase )
_lowerCAmelCase = self.vqvae.decode(_lowerCAmelCase , force_not_quantize=_lowerCAmelCase ).sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> torch.FloatTensor:
_lowerCAmelCase , _lowerCAmelCase = torch.sort(_lowerCAmelCase , 1 , descending=_lowerCAmelCase )
_lowerCAmelCase = torch.exp(_lowerCAmelCase )
_lowerCAmelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_lowerCAmelCase = torch.full_like(keep_mask[:, 0:1, :] , _lowerCAmelCase )
_lowerCAmelCase = torch.cat((all_true, keep_mask) , dim=1 )
_lowerCAmelCase = keep_mask[:, :-1, :]
_lowerCAmelCase = keep_mask.gather(1 , indices.argsort(1 ) )
_lowerCAmelCase = log_p_x_0.clone()
_lowerCAmelCase = -torch.inf # -inf = log(0)
return rv
| 158 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = []
for part_id in partition_order:
_lowerCAmelCase = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(SCREAMING_SNAKE_CASE_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(100 ).repartition(1 )
_lowerCAmelCase = Spark(SCREAMING_SNAKE_CASE_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(10 ).repartition(2 )
_lowerCAmelCase = [1, 0]
_lowerCAmelCase = _generate_iterable_examples(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Reverse the partitions.
_lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_lowerCAmelCase , _lowerCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(10 ).repartition(1 )
_lowerCAmelCase = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
_lowerCAmelCase = lambda SCREAMING_SNAKE_CASE_ : x.reverse()
_lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , [2, 1, 0] )
_lowerCAmelCase = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ ).shuffle_data_sources(SCREAMING_SNAKE_CASE_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase , _lowerCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_lowerCAmelCase = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase , _lowerCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_lowerCAmelCase = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase , _lowerCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(100 ).repartition(1 )
_lowerCAmelCase = Spark(SCREAMING_SNAKE_CASE_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 158 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ : int = logging.get_logger(__name__)
class a__ ( UpperCamelCase__ ):
a : List[str] = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = 0.9 , A = PILImageResampling.BICUBIC , A = True , A = None , A = 1 / 255 , A = True , A = True , A = None , A = None , **A , ) -> None:
'''simple docstring'''
super().__init__(**A )
a = size if size is not None else {"shortest_edge": 224}
a = get_size_dict(A , default_to_square=A )
a = crop_size if crop_size is not None else {"height": 224, "width": 224}
a = get_size_dict(A , param_name="crop_size" )
a = do_resize
a = size
a = crop_pct
a = resample
a = do_center_crop
a = crop_size
a = do_rescale
a = rescale_factor
a = do_normalize
a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCAmelCase_ ( self , A , A , A = None , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray:
'''simple docstring'''
a = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
a = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
a = int(size["height"] / crop_pct )
else:
a = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(A ) )
a = get_resize_output_image_size(A , size=A , default_to_square=A )
else:
if "shortest_edge" in size:
a = get_resize_output_image_size(A , size=size["shortest_edge"] , default_to_square=A )
elif "height" in size and "width" in size:
a = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(A ) )
return resize(A , size=A , resample=A , data_format=A , **A )
def lowerCAmelCase_ ( self , A , A , A = None , **A , ) -> np.ndarray:
'''simple docstring'''
a = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(A , size=(size["height"], size["width"]) , data_format=A , **A )
def lowerCAmelCase_ ( self , A , A , A = None , **A , ) -> int:
'''simple docstring'''
return rescale(A , scale=A , data_format=A , **A )
def lowerCAmelCase_ ( self , A , A , A , A = None , **A , ) -> np.ndarray:
'''simple docstring'''
return normalize(A , mean=A , std=A , data_format=A , **A )
def lowerCAmelCase_ ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
'''simple docstring'''
a = do_resize if do_resize is not None else self.do_resize
a = crop_pct if crop_pct is not None else self.crop_pct
a = resample if resample is not None else self.resample
a = do_center_crop if do_center_crop is not None else self.do_center_crop
a = do_rescale if do_rescale is not None else self.do_rescale
a = rescale_factor if rescale_factor is not None else self.rescale_factor
a = do_normalize if do_normalize is not None else self.do_normalize
a = image_mean if image_mean is not None else self.image_mean
a = image_std if image_std is not None else self.image_std
a = size if size is not None else self.size
a = get_size_dict(A , default_to_square=A )
a = crop_size if crop_size is not None else self.crop_size
a = get_size_dict(A , param_name="crop_size" )
a = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
a = [to_numpy_array(A ) for image in images]
if do_resize:
a = [self.resize(image=A , size=A , crop_pct=A , resample=A ) for image in images]
if do_center_crop:
a = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
a = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
a = [self.normalize(image=A , mean=A , std=A ) for image in images]
a = [to_channel_dimension_format(A , A ) for image in images]
a = {"pixel_values": images}
return BatchFeature(data=A , tensor_type=A )
| 364 |
import math
import sys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
if number != int(__UpperCamelCase):
raise ValueError("the value of input must be a natural number")
if number < 0:
raise ValueError("the value of input must not be a negative number")
if number == 0:
return 1
a = [-1] * (number + 1)
a = 0
for i in range(1 , number + 1):
a = sys.maxsize
a = int(math.sqrt(__UpperCamelCase))
for j in range(1 , root + 1):
a = 1 + answers[i - (j**2)]
a = min(__UpperCamelCase , __UpperCamelCase)
a = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 | 0 |
'''simple docstring'''
class lowercase__ :
def __init__( self : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : int = name
_UpperCamelCase : Optional[int] = value
_UpperCamelCase : Tuple = weight
def __repr__( self : Optional[int] ):
'''simple docstring'''
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return self.value
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return self.name
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return self.weight
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.value / self.weight
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : str = []
for i in range(len(UpperCAmelCase_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = sorted(UpperCAmelCase_ , key=UpperCAmelCase_ , reverse=UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase , _UpperCamelCase : List[Any] = 0.0, 0.0
for i in range(len(UpperCAmelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __lowercase ( lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : List[Any] = 'huggingface/label-files'
UpperCamelCase_ : int = 'imagenet-1k-id2label.json'
UpperCamelCase_ : int = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) )
UpperCamelCase_ : str = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCamelCase_ : str = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCamelCase_ : int = BitConfig(
conv_layer=lowerCamelCase , num_labels=1000 , idalabel=lowerCamelCase , labelaid=lowerCamelCase , )
return config
def __lowercase ( lowerCamelCase : int ):
if "stem.conv" in name:
UpperCamelCase_ : str = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
UpperCamelCase_ : str = name.replace('blocks' , 'layers' )
if "head.fc" in name:
UpperCamelCase_ : Dict = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
UpperCamelCase_ : List[str] = 'bit.' + name
if "bit" not in name and "classifier" not in name:
UpperCamelCase_ : int = 'bit.encoder.' + name
return name
def __lowercase ( ):
UpperCamelCase_ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def __lowercase ( lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : List[str]=False ):
UpperCamelCase_ : Optional[Any] = get_config(lowerCamelCase )
# load original model from timm
UpperCamelCase_ : Dict = create_model(lowerCamelCase , pretrained=lowerCamelCase )
timm_model.eval()
# load state_dict of original model
UpperCamelCase_ : str = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCamelCase_ : Tuple = state_dict.pop(lowerCamelCase )
UpperCamelCase_ : str = val.squeeze() if 'head' in key else val
# load HuggingFace model
UpperCamelCase_ : Dict = BitForImageClassification(lowerCamelCase )
model.eval()
model.load_state_dict(lowerCamelCase )
# create image processor
UpperCamelCase_ : int = create_transform(**resolve_data_config({} , model=lowerCamelCase ) )
UpperCamelCase_ : List[Any] = transform.transforms
UpperCamelCase_ : str = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
UpperCamelCase_ : Tuple = BitImageProcessor(
do_resize=lowerCamelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCamelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCamelCase_ : Dict = prepare_img()
UpperCamelCase_ : Any = transform(lowerCamelCase ).unsqueeze(0 )
UpperCamelCase_ : Union[str, Any] = processor(lowerCamelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCamelCase , lowerCamelCase )
# verify logits
with torch.no_grad():
UpperCamelCase_ : Dict = model(lowerCamelCase )
UpperCamelCase_ : Tuple = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCamelCase_ : List[Any] = timm_model(lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
a_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 175 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase : Any = logging.get_logger(__name__)
lowercase : Tuple = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class A__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__A : Optional[Any] = '''dinat'''
__A : Optional[Any] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowercase=4 , lowercase=3 , lowercase=64 , lowercase=[3, 4, 6, 5] , lowercase=[2, 4, 8, 16] , lowercase=7 , lowercase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowercase=3.0 , lowercase=True , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase="gelu" , lowercase=0.02 , lowercase=1e-5 , lowercase=0.0 , lowercase=None , lowercase=None , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(**lowercase)
a__ : Any = patch_size
a__ : List[Any] = num_channels
a__ : Tuple = embed_dim
a__ : int = depths
a__ : List[str] = len(lowercase)
a__ : List[Any] = num_heads
a__ : Optional[Any] = kernel_size
a__ : str = dilations
a__ : Optional[int] = mlp_ratio
a__ : Any = qkv_bias
a__ : Tuple = hidden_dropout_prob
a__ : Union[str, Any] = attention_probs_dropout_prob
a__ : Optional[int] = drop_path_rate
a__ : List[str] = hidden_act
a__ : Optional[Any] = layer_norm_eps
a__ : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a__ : List[Any] = int(embed_dim * 2 ** (len(lowercase) - 1))
a__ : Union[str, Any] = layer_scale_init_value
a__ : str = ['stem'] + [F'stage{idx}' for idx in range(1 , len(lowercase) + 1)]
a__ , a__ : str = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names)
| 225 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A_ ( A__ , A__ , A__ ) -> Any:
# Construct model
if gpta_config_file == "":
a__ : Optional[int] = GPTaConfig()
else:
a__ : List[str] = GPTaConfig.from_json_file(A__ )
a__ : List[str] = GPTaModel(A__ )
# Load weights from numpy
load_tf_weights_in_gpta(A__ , A__ , A__ )
# Save pytorch-model
a__ : Optional[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
a__ : Any = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , A__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
lowercase : int = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 225 | 1 |
'''simple docstring'''
from __future__ import annotations
lowercase : Optional[Any] = list[tuple[int, int]]
lowercase : Optional[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
A : str = pos_x
A : List[Any] = pos_y
A : Union[str, Any] = (pos_y, pos_x)
A : Tuple = goal_x
A : Dict = goal_y
A : Optional[int] = g_cost
A : Union[str, Any] = parent
A : Union[str, Any] = self.calculate_heuristic()
def __lowerCAmelCase ( self ) -> float:
"""simple docstring"""
A : List[Any] = abs(self.pos_x - self.goal_x )
A : str = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE )
A : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE )
A : List[Any] = [self.start]
A : list[Node] = []
A : List[Any] = False
def __lowerCAmelCase ( self ) -> Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A : Tuple = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
A : List[Any] = True
return self.retrace_path(SCREAMING_SNAKE_CASE )
self.closed_nodes.append(SCREAMING_SNAKE_CASE )
A : Tuple = self.get_successors(SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
A : Optional[int] = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]:
"""simple docstring"""
A : Union[str, Any] = []
for action in delta:
A : Union[str, Any] = parent.pos_x + action[1]
A : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Path:
"""simple docstring"""
A : Optional[Any] = node
A : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A : str = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase : Tuple = (0, 0)
lowercase : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
lowercase : List[str] = GreedyBestFirst(init, goal)
lowercase : Any = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase : Optional[Any] = 2
for elem in grid:
print(elem)
| 3 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""note_seq"""]
def __init__( self : List[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ["""note_seq"""] )
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
@classmethod
def lowerCamelCase__ ( cls : List[str] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
| 115 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[str] = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323 | 1 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
return EnvironmentCommand()
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class __UpperCAmelCase (_UpperCAmelCase ):
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: ArgumentParser ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCAmelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCAmelCase_ )
def __init__( self: List[str] , UpperCAmelCase_: List[str] , *UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = accelerate_config_file
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """not installed"""
if is_safetensors_available():
import safetensors
_SCREAMING_SNAKE_CASE = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_SCREAMING_SNAKE_CASE = F'{safetensors.__version__} but is ignored because of PyTorch version too old.'
_SCREAMING_SNAKE_CASE = """not installed"""
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_SCREAMING_SNAKE_CASE = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = load_config_from_file(self._accelerate_config_file ).to_dict()
_SCREAMING_SNAKE_CASE = (
"""\n""".join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
else F'\t{accelerate_config}'
)
_SCREAMING_SNAKE_CASE = """not installed"""
_SCREAMING_SNAKE_CASE = """NA"""
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = torch.__version__
_SCREAMING_SNAKE_CASE = torch.cuda.is_available()
_SCREAMING_SNAKE_CASE = """not installed"""
_SCREAMING_SNAKE_CASE = """NA"""
if is_tf_available():
import tensorflow as tf
_SCREAMING_SNAKE_CASE = tf.__version__
try:
# deprecated in v2.1
_SCREAMING_SNAKE_CASE = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_SCREAMING_SNAKE_CASE = bool(tf.config.list_physical_devices("""GPU""" ) )
_SCREAMING_SNAKE_CASE = """not installed"""
_SCREAMING_SNAKE_CASE = """not installed"""
_SCREAMING_SNAKE_CASE = """not installed"""
_SCREAMING_SNAKE_CASE = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_SCREAMING_SNAKE_CASE = flax.__version__
_SCREAMING_SNAKE_CASE = jax.__version__
_SCREAMING_SNAKE_CASE = jaxlib.__version__
_SCREAMING_SNAKE_CASE = jax.lib.xla_bridge.get_backend().platform
_SCREAMING_SNAKE_CASE = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'{safetensors_version}',
"""Accelerate version""": F'{accelerate_version}',
"""Accelerate config""": F'{accelerate_config_str}',
"""PyTorch version (GPU?)""": F'{pt_version} ({pt_cuda_available})',
"""Tensorflow version (GPU?)""": F'{tf_version} ({tf_cuda_available})',
"""Flax version (CPU?/GPU?/TPU?)""": F'{flax_version} ({jax_backend})',
"""Jax version""": F'{jax_version}',
"""JaxLib version""": F'{jaxlib_version}',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCAmelCase_ ) )
return info
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: Tuple ):
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 306 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __UpperCAmelCase :
def __init__( self: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int=13 , UpperCAmelCase_: Optional[int]=7 , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=True , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Optional[int]=33 , UpperCAmelCase_: Tuple=32 , UpperCAmelCase_: List[Any]=5 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Any=37 , UpperCAmelCase_: Optional[Any]="gelu" , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: Dict=512 , UpperCAmelCase_: int=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = EsmForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = False
__snake_case : Dict = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : List[Any] = ()
__snake_case : Dict = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : int = True
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_SCREAMING_SNAKE_CASE = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_SCREAMING_SNAKE_CASE = create_position_ids_from_input_ids(UpperCAmelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.empty(2 , 4 , 30 )
_SCREAMING_SNAKE_CASE = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_SCREAMING_SNAKE_CASE = torch.as_tensor([expected_single_positions, expected_single_positions] )
_SCREAMING_SNAKE_CASE = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ):
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = 33
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None ):
if attention_mask is None:
UpperCAmelCase_ : List[str] = tf.cast(tf.math.not_equal(a__, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = OPTConfig
SCREAMING_SNAKE_CASE__ : Tuple = {}
SCREAMING_SNAKE_CASE__ : int = """gelu"""
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=16 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=16 , lowercase_=16 , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : Optional[int] = eos_token_id
UpperCAmelCase_ : Optional[Any] = pad_token_id
UpperCAmelCase_ : Optional[Any] = bos_token_id
UpperCAmelCase_ : List[Any] = embed_dim
UpperCAmelCase_ : List[Any] = word_embed_proj_dim
UpperCAmelCase_ : Any = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ : List[Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_snake_case , **self.config_updates , )
UpperCAmelCase_ : Any = prepare_opt_inputs_dict(_snake_case , _snake_case )
return config, inputs_dict
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = TFOPTModel(config=_snake_case )
UpperCAmelCase_ : Union[str, Any] = inputs_dict["input_ids"]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : int = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : Optional[Any] = 1
# first forward pass
UpperCAmelCase_ : Tuple = model(_snake_case , attention_mask=_snake_case , use_cache=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ : str = model(_snake_case , attention_mask=_snake_case )[0]
UpperCAmelCase_ : Dict = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ : Any = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-3 )
@require_tf
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (TFOPTForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = TFOPTModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase_ , lowercase_ ):
if hasattr(_snake_case , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_snake_case , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
UpperCAmelCase_ : Union[str, Any] = model_class(config=_snake_case )
UpperCAmelCase_ : str = _get_word_embedding_weight(_snake_case , model.get_input_embeddings() )
UpperCAmelCase_ : int = _get_word_embedding_weight(_snake_case , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_snake_case )
UpperCAmelCase_ : Dict = _get_word_embedding_weight(_snake_case , model.get_input_embeddings() )
UpperCAmelCase_ : List[Any] = _get_word_embedding_weight(_snake_case , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
UpperCAmelCase_ : Union[str, Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _snake_case )
# check that weights remain the same after resizing
UpperCAmelCase_ : List[Any] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCAmelCase_ : List[Any] = False
self.assertTrue(_snake_case )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _snake_case )
UpperCAmelCase_ : Optional[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCAmelCase_ : str = False
self.assertTrue(_snake_case )
def __a ( __lowerCamelCase ):
return tf.constant(a__, dtype=tf.intaa )
@require_tf
class A_ (unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 99
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
UpperCAmelCase_ : Any = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
UpperCAmelCase_ : List[str] = input_ids.shape[0]
UpperCAmelCase_ : str = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class A_ (unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = TFOPTModel.from_pretrained("facebook/opt-350m" )
UpperCAmelCase_ : Any = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCAmelCase_ : Union[str, Any] = tf.not_equal(_snake_case , model.config.pad_token_id )
with tf.GradientTape():
UpperCAmelCase_ : Tuple = model(input_ids=_snake_case , attention_mask=_snake_case ).last_hidden_state
UpperCAmelCase_ : Any = (1, 11, 512)
self.assertEqual(output.shape , _snake_case )
UpperCAmelCase_ : Tuple = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=4E-3 ) )
UpperCAmelCase_ : Union[str, Any] = tf.function(_snake_case , jit_compile=_snake_case )
UpperCAmelCase_ : List[Any] = xla_generate(_snake_case , _snake_case )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=4E-2 ) )
@require_tf
@slow
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : Optional[int] = "facebook/opt-350m"
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = TFOPTForCausalLM.from_pretrained(self.path_model )
UpperCAmelCase_ : str = GPTaTokenizer.from_pretrained(self.path_model )
UpperCAmelCase_ : List[Any] = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
UpperCAmelCase_ : Tuple = tokenizer(_snake_case , return_tensors="tf" , padding=_snake_case , add_special_tokens=_snake_case )
UpperCAmelCase_ : Union[str, Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
UpperCAmelCase_ : List[str] = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-4 ) )
UpperCAmelCase_ : List[str] = tf.function(_snake_case , jit_compile=_snake_case )
UpperCAmelCase_ : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-4 ) )
@require_tf
@slow
class A_ (unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "facebook/opt-125m"
UpperCAmelCase_ : Union[str, Any] = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : str = GPTaTokenizer.from_pretrained(_snake_case )
UpperCAmelCase_ : Dict = TFOPTForCausalLM.from_pretrained(_snake_case )
for prompt in self.prompts:
UpperCAmelCase_ : str = tokenizer(_snake_case , return_tensors="tf" ).input_ids
UpperCAmelCase_ : Any = model.generate(_snake_case , max_length=10 )
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
predicted_outputs += generated_string
self.assertListEqual(_snake_case , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = "facebook/opt-350m"
UpperCAmelCase_ : Dict = GPTaTokenizer.from_pretrained(_snake_case )
UpperCAmelCase_ : Tuple = TFOPTForCausalLM.from_pretrained(_snake_case )
UpperCAmelCase_ : Optional[Any] = "left"
# use different length sentences to test batching
UpperCAmelCase_ : Dict = [
"Hello, my dog is a little",
"Today, I",
]
UpperCAmelCase_ : List[str] = tokenizer(_snake_case , return_tensors="tf" , padding=_snake_case )
UpperCAmelCase_ : List[str] = inputs["input_ids"]
UpperCAmelCase_ : Optional[int] = model.generate(input_ids=_snake_case , attention_mask=inputs["attention_mask"] )
UpperCAmelCase_ : Dict = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
UpperCAmelCase_ : Any = model.generate(input_ids=_snake_case )
UpperCAmelCase_ : Optional[int] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
UpperCAmelCase_ : List[str] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
UpperCAmelCase_ : List[Any] = model.generate(input_ids=_snake_case , max_length=model.config.max_length - num_paddings )
UpperCAmelCase_ : str = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
UpperCAmelCase_ : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_snake_case )
UpperCAmelCase_ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=_snake_case )
UpperCAmelCase_ : Dict = [
"Hello, my dog is a little bit of a dork.\nI\'m a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , [non_padded_sentence, padded_sentence] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = "facebook/opt-350m"
UpperCAmelCase_ : Union[str, Any] = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Tuple = GPTaTokenizer.from_pretrained(_snake_case )
UpperCAmelCase_ : Dict = TFOPTForCausalLM.from_pretrained(_snake_case )
for prompt in self.prompts:
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case , return_tensors="tf" ).input_ids
UpperCAmelCase_ : List[Any] = model.generate(_snake_case , max_length=10 )
UpperCAmelCase_ : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
predicted_outputs += generated_string
self.assertListEqual(_snake_case , _snake_case )
| 360 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ctrl"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase_=24_6534 , lowercase_=256 , lowercase_=1280 , lowercase_=8192 , lowercase_=48 , lowercase_=16 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1E-6 , lowercase_=0.02 , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : List[str] = n_embd
UpperCAmelCase_ : Dict = n_layer
UpperCAmelCase_ : Optional[int] = n_head
UpperCAmelCase_ : List[str] = dff
UpperCAmelCase_ : Tuple = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : List[str] = use_cache
super().__init__(**lowercase_ )
| 23 | 0 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A ( _lowerCAmelCase , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
__UpperCAmelCase : int = "ssube/stable-diffusion-x4-upscaler-onnx"
def lowercase_ (self : Any , __UpperCAmelCase : Optional[int]=0 ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(_lowercase ) )
UpperCAmelCase__ = torch.manual_seed(_lowercase )
UpperCAmelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCAmelCase__ = self.get_dummy_inputs()
UpperCAmelCase__ = pipe(**_lowercase ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase__ = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowercase_ (self : str ) -> int:
"""simple docstring"""
UpperCAmelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCAmelCase__ = self.get_dummy_inputs()
UpperCAmelCase__ = pipe(**_lowercase ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase__ = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase_ (self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCAmelCase__ = self.get_dummy_inputs()
UpperCAmelCase__ = pipe(**_lowercase ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase__ = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase_ (self : Any ) -> str:
"""simple docstring"""
UpperCAmelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCAmelCase__ = self.get_dummy_inputs()
UpperCAmelCase__ = pipe(**_lowercase ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase__ = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase_ (self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCAmelCase__ = self.get_dummy_inputs()
UpperCAmelCase__ = pipe(**_lowercase ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase__ = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A ( unittest.TestCase ):
@property
def lowercase_ (self : List[Any] ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ (self : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = ort.SessionOptions()
UpperCAmelCase__ = False
return options
def lowercase_ (self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase__ = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
UpperCAmelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCAmelCase__ = "A fantasy landscape, trending on artstation"
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(
prompt=_lowercase , image=_lowercase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=_lowercase , output_type="np" , )
UpperCAmelCase__ = output.images
UpperCAmelCase__ = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase__ = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowercase_ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase__ = init_image.resize((1_2_8, 1_2_8) )
UpperCAmelCase__ = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
UpperCAmelCase__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCAmelCase__ = "A fantasy landscape, trending on artstation"
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(
prompt=_lowercase , image=_lowercase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=_lowercase , output_type="np" , )
UpperCAmelCase__ = output.images
UpperCAmelCase__ = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase__ = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 65 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowercase__ ( snake_case_ :Union[str, Any]=None ):
if subparsers is not None:
__UpperCAmelCase = subparsers.add_parser('''env''' )
else:
__UpperCAmelCase = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=snake_case_ , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = torch.__version__
__UpperCAmelCase = torch.cuda.is_available()
__UpperCAmelCase = is_xpu_available()
__UpperCAmelCase = is_npu_available()
__UpperCAmelCase = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(snake_case_ ):
__UpperCAmelCase = load_config_from_file(args.config_file ).to_dict()
__UpperCAmelCase = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(snake_case_ ),
'''PyTorch NPU available''': str(snake_case_ ),
'''System RAM''': F'''{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB''',
}
if pt_cuda_available:
__UpperCAmelCase = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
__UpperCAmelCase = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(snake_case_ , snake_case_ )
else F'''\t{accelerate_config}'''
)
print(snake_case_ )
__UpperCAmelCase = accelerate_config
return info
def lowercase__ ( ):
__UpperCAmelCase = env_command_parser()
__UpperCAmelCase = parser.parse_args()
env_command(snake_case_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 332 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = 'switch_transformers'
lowerCamelCase__ = ['past_key_values']
lowerCamelCase__ = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , lowercase=32128 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=3 , lowercase=12 , lowercase=3 , lowercase=12 , lowercase=8 , lowercase=False , lowercase=0.01 , lowercase="float32" , lowercase=False , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=0.0_01 , lowercase=0.0_01 , lowercase=1.0 , lowercase="relu" , lowercase=True , lowercase=False , lowercase=True , lowercase=0 , lowercase=1 , **lowercase , ):
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Tuple = d_model
_lowerCamelCase : Optional[int] = d_kv
_lowerCamelCase : int = d_ff
_lowerCamelCase : str = num_sparse_encoder_layers
_lowerCamelCase : Union[str, Any] = num_layers
_lowerCamelCase : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowerCamelCase : Union[str, Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_lowerCamelCase : Union[str, Any] = self.num_layers // self.num_sparse_encoder_layers
else:
_lowerCamelCase : str = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_lowerCamelCase : Tuple = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_lowerCamelCase : Tuple = self.num_decoder_layers # HACK: this will create 0 sparse layers
_lowerCamelCase : List[str] = num_heads
_lowerCamelCase : str = num_experts
_lowerCamelCase : int = expert_capacity
_lowerCamelCase : Dict = router_bias
_lowerCamelCase : List[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
_lowerCamelCase : List[Any] = router_dtype
_lowerCamelCase : Dict = router_ignore_padding_tokens
_lowerCamelCase : Tuple = relative_attention_num_buckets
_lowerCamelCase : str = relative_attention_max_distance
_lowerCamelCase : List[str] = dropout_rate
_lowerCamelCase : str = layer_norm_epsilon
_lowerCamelCase : str = initializer_factor
_lowerCamelCase : Tuple = feed_forward_proj
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : Optional[Any] = add_router_probs
_lowerCamelCase : List[str] = router_z_loss_coef
_lowerCamelCase : Dict = router_aux_loss_coef
_lowerCamelCase : int = self.feed_forward_proj.split('-' )
_lowerCamelCase : int = act_info[-1]
_lowerCamelCase : int = act_info[0] == 'gated'
if len(__lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(__lowerCAmelCase ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_lowerCamelCase : Optional[int] = 'gelu_new'
super().__init__(
pad_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase , ) | 359 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
_lowerCamelCase : Tuple = VideoClassificationPipeline(model=lowercase , image_processor=lowercase , top_k=2 )
_lowerCamelCase : List[str] = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def A_ ( self , lowercase , lowercase ):
for example in examples:
_lowerCamelCase : Tuple = video_classifier(lowercase )
self.assertEqual(
lowercase , [
{'score': ANY(lowercase ), 'label': ANY(lowercase )},
{'score': ANY(lowercase ), 'label': ANY(lowercase )},
] , )
@require_torch
def A_ ( self ):
_lowerCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
_lowerCamelCase : Tuple = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
_lowerCamelCase : Dict = pipeline(
'video-classification' , model=lowercase , feature_extractor=lowercase , frame_sampling_rate=4 )
_lowerCamelCase : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
_lowerCamelCase : Dict = video_classifier(lowercase , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , )
_lowerCamelCase : str = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
] , )
@require_tf
def A_ ( self ):
pass | 12 | 0 |
def A__ ( SCREAMING_SNAKE_CASE__ = 50) -> Optional[int]:
__snake_case: Any = [1] * (length + 1)
for row_length in range(3 , length + 1):
for block_length in range(3 , row_length + 1):
for block_start in range(row_length - block_length):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 111 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : Any = 16
UpperCAmelCase : str = 32
def _A ( SCREAMING_SNAKE_CASE : Accelerator , SCREAMING_SNAKE_CASE : int = 16 ):
"""simple docstring"""
a__ : int =AutoTokenizer.from_pretrained("bert-base-cased" )
a__ : List[str] =load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
a__ : int =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ : Dict =datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Dict =tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ : Optional[Any] =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ : str =16
elif accelerator.mixed_precision != "no":
a__ : Union[str, Any] =8
else:
a__ : List[str] =None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding="longest" , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
a__ : Any =DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
a__ : int =DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase : str = mocked_dataloaders # noqa: F811
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE ) == "1":
a__ : Tuple =2
# Initialize accelerator
a__ : int =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : Optional[int] =config["lr"]
a__ : Union[str, Any] =int(config["num_epochs"] )
a__ : Any =int(config["seed"] )
a__ : Dict =int(config["batch_size"] )
a__ : int =evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
a__ : int =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a__ : Dict =batch_size // MAX_GPU_BATCH_SIZE
a__ : Tuple =MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE )
a__ , a__ : Optional[int] =get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : List[str] =AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ : List[str] =model.to(accelerator.device )
# Instantiate optimizer
a__ : List[Any] =AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
a__ : Optional[int] =get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : Optional[int] =accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a__ : Dict =model(**SCREAMING_SNAKE_CASE )
a__ : List[Any] =outputs.loss
a__ : List[str] =loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
a__ : Optional[Any] =0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ : Any =model(**SCREAMING_SNAKE_CASE )
a__ : str =outputs.logits.argmax(dim=-1 )
a__ , a__ : List[str] =accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(SCREAMING_SNAKE_CASE ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
a__ : Optional[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
a__ : Dict =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
a__ : Tuple =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE )
def _A ( ):
"""simple docstring"""
a__ : List[str] =argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
a__ : str =parser.parse_args()
a__ : Optional[int] ={"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 95 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : str ) -> Any:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__lowerCAmelCase : Tuple = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = """sshleifer/tiny-gpt2"""
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_a , multi_process=_a , )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmark(_a )
__lowerCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = """sgugger/tiny-distilbert-classification"""
__lowerCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , )
__lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(_a )
__lowerCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Any = """sshleifer/tiny-gpt2"""
__lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__lowerCAmelCase : int = TensorFlowBenchmark(_a )
__lowerCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : str = """sshleifer/tiny-gpt2"""
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_a )
__lowerCAmelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_a , multi_process=_a , )
__lowerCAmelCase : str = TensorFlowBenchmark(_a , [config] )
__lowerCAmelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = """sshleifer/tiny-gpt2"""
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_a )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmark(_a , [config] )
__lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = """sshleifer/tiny-gpt2"""
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__lowerCAmelCase : str = TensorFlowBenchmark(_a )
__lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = """sshleifer/tiny-gpt2"""
__lowerCAmelCase : List[str] = AutoConfig.from_pretrained(_a )
__lowerCAmelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__lowerCAmelCase : int = TensorFlowBenchmark(_a , [config] )
__lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = """patrickvonplaten/t5-tiny-random"""
__lowerCAmelCase : str = AutoConfig.from_pretrained(_a )
__lowerCAmelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__lowerCAmelCase : Union[str, Any] = TensorFlowBenchmark(_a , configs=[config] )
__lowerCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = """sshleifer/tiny-gpt2"""
__lowerCAmelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_a , multi_process=_a , )
__lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(_a )
__lowerCAmelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(_a , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(_a , """env.csv""" ) , multi_process=_a , )
__lowerCAmelCase : Dict = TensorFlowBenchmark(_a )
benchmark.run()
self.assertTrue(Path(os.path.join(_a , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_a , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_a , """env.csv""" ) ).exists() )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(lowerCAmelCase : Tuple ):
self.assertTrue(hasattr(_a , """sequential""" ) )
self.assertTrue(hasattr(_a , """cumulative""" ) )
self.assertTrue(hasattr(_a , """current""" ) )
self.assertTrue(hasattr(_a , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , """log.txt""" ) , log_print=_a , trace_memory_line_by_line=_a , eager_mode=_a , multi_process=_a , )
__lowerCAmelCase : Any = TensorFlowBenchmark(_a )
__lowerCAmelCase : List[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_a , """log.txt""" ) ).exists() )
| 361 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase : str , lowerCAmelCase : List[Any]=13 , lowerCAmelCase : List[str]=7 , lowerCAmelCase : int=False , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : Optional[int]=19 , lowerCAmelCase : Any=32 , lowerCAmelCase : int=5 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[str]=5_12 , lowerCAmelCase : Dict=16 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : Dict=3 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : Union[str, Any] = seq_length
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Dict = use_input_mask
__lowerCAmelCase : Dict = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Any = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : int = num_labels
__lowerCAmelCase : Tuple = num_choices
__lowerCAmelCase : str = scope
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : int = None
if self.use_input_mask:
__lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Any = None
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Optional[int] = None
if self.use_labels:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowerCAmelCase , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , )
return config
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = EsmForProteinFolding(config=lowerCAmelCase ).float()
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Any = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
__lowerCAmelCase : str = model(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = model(lowerCAmelCase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,
) : Optional[int] = config_and_inputs
__lowerCAmelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Tuple =False
lowerCamelCase : int =(EsmForProteinFolding,) if is_torch_available() else ()
lowerCamelCase : int =()
lowerCamelCase : Dict ={} if is_torch_available() else {}
lowerCamelCase : str =False
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = EsmFoldModelTester(self )
__lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
@unittest.skip("""Does not support attention outputs""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold only has one output format.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@require_torch
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowerCAmelCase : str = model(lowerCAmelCase )["""positions"""]
__lowerCAmelCase : str = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowerCAmelCase , atol=1e-4 ) )
| 139 | 0 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@slow
@require_torch
def A_ ( self ):
_lowerCamelCase : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
_lowerCamelCase : Dict = BertTokenizer.from_pretrained('bert-base-uncased' )
_lowerCamelCase : Optional[Any] = bertabert.config.encoder.vocab_size
_lowerCamelCase : int = tokenizer.sep_token_id
_lowerCamelCase : Any = tokenizer.cls_token_id
_lowerCamelCase : Tuple = 128
_lowerCamelCase : Dict = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
_lowerCamelCase : List[str] = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
_lowerCamelCase : int = train_dataset.select(range(32 ) )
_lowerCamelCase : int = val_dataset.select(range(16 ) )
_lowerCamelCase : Dict = 4
def _map_to_encoder_decoder_inputs(lowercase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_lowerCamelCase : Optional[int] = tokenizer(batch['article'] , padding='max_length' , truncation=_a , max_length=512 )
_lowerCamelCase : Union[str, Any] = tokenizer(batch['highlights'] , padding='max_length' , truncation=_a , max_length=128 )
_lowerCamelCase : Union[str, Any] = inputs.input_ids
_lowerCamelCase : List[Any] = inputs.attention_mask
_lowerCamelCase : List[Any] = outputs.input_ids
_lowerCamelCase : List[Any] = outputs.input_ids.copy()
_lowerCamelCase : List[Any] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
_lowerCamelCase : int = outputs.attention_mask
assert all(len(_a ) == 512 for x in inputs.input_ids )
assert all(len(_a ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(lowercase ):
_lowerCamelCase : List[Any] = pred.label_ids
_lowerCamelCase : List[Any] = pred.predictions
# all unnecessary tokens are removed
_lowerCamelCase : Any = tokenizer.batch_decode(_a , skip_special_tokens=_a )
_lowerCamelCase : Any = tokenizer.batch_decode(_a , skip_special_tokens=_a )
_lowerCamelCase : str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_a ) )] ) / len(_a )
return {"accuracy": accuracy}
# map train dataset
_lowerCamelCase : Tuple = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
_lowerCamelCase : Union[str, Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
_lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : str = SeqaSeqTrainingArguments(
output_dir=_a , per_device_train_batch_size=_a , per_device_eval_batch_size=_a , predict_with_generate=_a , evaluation_strategy='steps' , do_train=_a , do_eval=_a , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_lowerCamelCase : List[Any] = SeqaSeqTrainer(
model=_a , args=_a , compute_metrics=_compute_metrics , train_dataset=_a , eval_dataset=_a , tokenizer=_a , )
# start training
trainer.train() | 96 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
__magic_name__ : List[Any] = parent
__magic_name__ : Optional[Any] = batch_size
__magic_name__ : Dict = seq_length
__magic_name__ : Union[str, Any] = is_training
__magic_name__ : Optional[Any] = use_attention_mask
__magic_name__ : Optional[Any] = use_token_type_ids
__magic_name__ : int = use_labels
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : Optional[Any] = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : Any = intermediate_size
__magic_name__ : List[Any] = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Tuple = type_vocab_size
__magic_name__ : List[str] = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : List[Any] = num_choices
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_attention_mask:
__magic_name__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : str = None
if self.use_token_type_ids:
__magic_name__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = config_and_inputs
__magic_name__ : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = config_and_inputs
__magic_name__ : Tuple = True
__magic_name__ : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_class_name in self.all_model_classes:
__magic_name__ : Optional[Any] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Union[str, Any] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__magic_name__ : List[str] = model(_a )[0]
__magic_name__ : str = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , _a )
# compare the actual values for a slice.
__magic_name__ : List[str] = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Tuple = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__magic_name__ : Tuple = model(_a )[0]
# compare the actual values for a slice.
__magic_name__ : Dict = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 281 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370 |
from __future__ import annotations
def snake_case_ ( snake_case , snake_case ) -> list[str]:
if nth_term == "":
return [""]
lowercase__: Tuple = int(snake_case )
lowercase__: int = int(snake_case )
lowercase__: list[str] = []
for temp in range(int(snake_case ) ):
series.append(f'1 / {pow(temp + 1 , int(snake_case ) )}' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = int(input('''Enter the last number (nth term) of the P-Series'''))
__lowerCAmelCase = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 288 | 0 |
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class A_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , snake_case_ : Optional[Any] ):
_UpperCAmelCase = parent
def lowercase ( self : str ):
return {}
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
_UpperCAmelCase = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Dict = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowercase ( self : List[str] ):
_UpperCAmelCase = MarkupLMFeatureExtractionTester(self )
@property
def lowercase ( self : int ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowercase ( self : List[Any] ):
# Initialize feature_extractor
_UpperCAmelCase = self.feature_extraction_class()
# Test not batched input
_UpperCAmelCase = get_html_strings()[0]
_UpperCAmelCase = feature_extractor(snake_case_ )
# fmt: off
_UpperCAmelCase = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
_UpperCAmelCase = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , snake_case_ )
self.assertEqual(encoding.xpaths , snake_case_ )
# Test batched
_UpperCAmelCase = get_html_strings()
_UpperCAmelCase = feature_extractor(snake_case_ )
# fmt: off
_UpperCAmelCase = expected_nodes + [["My First Heading", "My first paragraph."]]
_UpperCAmelCase = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , snake_case_ )
self.assertEqual(encoding.xpaths , snake_case_ )
| 22 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : Union[str, Any] = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[Any] = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = ['LayoutLMv3FeatureExtractor']
_snake_case : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284 | 0 |
import socket
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[Any] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case_ : List[Any] = socket.gethostname()
snake_case_ : Optional[Any] = 1_23_12
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
snake_case_ : Optional[Any] = sock.recv(10_24 )
if not data:
break
out_file.write(__a )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 88 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
snake_case_ : Dict = precision
snake_case_ : str = ceil(precision / 14 )
snake_case_ : str = 42_68_80 * Decimal(1_00_05 ).sqrt()
snake_case_ : Tuple = 1
snake_case_ : int = 13_59_14_09
snake_case_ : Tuple = Decimal(__a )
for k in range(1 , __a ):
snake_case_ : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(__a ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 88 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.